diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 9cec6278abc2c..7800688542160 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -238,60 +238,6 @@ static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, return selectImmSeq(CurDAG, DL, VT, Seq); } -static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef Regs, - unsigned NF, RISCVII::VLMUL LMUL) { - static const unsigned M1TupleRegClassIDs[] = { - RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID, - RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID, - RISCV::VRN8M1RegClassID}; - static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID, - RISCV::VRN3M2RegClassID, - RISCV::VRN4M2RegClassID}; - - assert(Regs.size() >= 2 && Regs.size() <= 8); - - unsigned RegClassID; - unsigned SubReg0; - switch (LMUL) { - default: - llvm_unreachable("Invalid LMUL."); - case RISCVII::VLMUL::LMUL_F8: - case RISCVII::VLMUL::LMUL_F4: - case RISCVII::VLMUL::LMUL_F2: - case RISCVII::VLMUL::LMUL_1: - static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, - "Unexpected subreg numbering"); - SubReg0 = RISCV::sub_vrm1_0; - RegClassID = M1TupleRegClassIDs[NF - 2]; - break; - case RISCVII::VLMUL::LMUL_2: - static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, - "Unexpected subreg numbering"); - SubReg0 = RISCV::sub_vrm2_0; - RegClassID = M2TupleRegClassIDs[NF - 2]; - break; - case RISCVII::VLMUL::LMUL_4: - static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, - "Unexpected subreg numbering"); - SubReg0 = RISCV::sub_vrm4_0; - RegClassID = RISCV::VRN2M4RegClassID; - break; - } - - SDLoc DL(Regs[0]); - SmallVector Ops; - - Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32)); - - for (unsigned I = 0; I < Regs.size(); ++I) { - Ops.push_back(Regs[I]); - Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32)); - } - SDNode *N = - CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops); - return SDValue(N, 0); -} - void RISCVDAGToDAGISel::addVectorLoadStoreOperands( SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl &Operands, @@ -338,22 +284,17 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands( Operands.push_back(Glue); } -void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, +void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided) { SDLoc DL(Node); - unsigned NF = Node->getNumValues() - 1; MVT VT = Node->getSimpleValueType(0); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); - SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL); - Operands.push_back(Merge); - CurOp += NF; + Operands.push_back(Node->getOperand(CurOp++)); addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands, /*IsLoad=*/true); @@ -367,33 +308,23 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); - SDValue SuperReg = SDValue(Load, 0); - for (unsigned I = 0; I < NF; ++I) { - unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); - ReplaceUses(SDValue(Node, I), - CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); - } - - ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); + ReplaceUses(SDValue(Node, 0), SDValue(Load, 0)); + ReplaceUses(SDValue(Node, 1), SDValue(Load, 1)); CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) { +void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, unsigned NF, + bool IsMasked) { SDLoc DL(Node); - unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain. MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); - SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); - Operands.push_back(MaskedOff); - CurOp += NF; + Operands.push_back(Node->getOperand(CurOp++)); addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ false, Operands, @@ -408,42 +339,40 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) { if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); - SDValue SuperReg = SDValue(Load, 0); - for (unsigned I = 0; I < NF; ++I) { - unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); - ReplaceUses(SDValue(Node, I), - CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); - } - - ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL - ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain + ReplaceUses(SDValue(Node, 0), SDValue(Load, 0)); // Result + ReplaceUses(SDValue(Node, 1), SDValue(Load, 1)); // VL + ReplaceUses(SDValue(Node, 2), SDValue(Load, 2)); // Chain CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked, +void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered) { SDLoc DL(Node); - unsigned NF = Node->getNumValues() - 1; MVT VT = Node->getSimpleValueType(0); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); - SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); - Operands.push_back(MaskedOff); - CurOp += NF; + Operands.push_back(Node->getOperand(CurOp++)); MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ true, Operands, /*IsLoad=*/true, &IndexVT); - assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && +#ifndef NDEBUG + // Number of element = RVVBitsPerBlock * LMUL / SEW + unsigned ContainedTyNumElts = RISCV::RVVBitsPerBlock >> Log2SEW; + auto DecodedLMUL = RISCVVType::decodeVLMUL(LMUL); + if (DecodedLMUL.second) + ContainedTyNumElts /= DecodedLMUL.first; + else + ContainedTyNumElts *= DecodedLMUL.first; + assert(ContainedTyNumElts == IndexVT.getVectorMinNumElements() && "Element count mismatch"); +#endif RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); @@ -460,34 +389,22 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked, if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); - SDValue SuperReg = SDValue(Load, 0); - for (unsigned I = 0; I < NF; ++I) { - unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); - ReplaceUses(SDValue(Node, I), - CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); - } - - ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); + ReplaceUses(SDValue(Node, 0), SDValue(Load, 0)); + ReplaceUses(SDValue(Node, 1), SDValue(Load, 1)); CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked, +void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided) { SDLoc DL(Node); - unsigned NF = Node->getNumOperands() - 4; - if (IsStrided) - NF--; - if (IsMasked) - NF--; MVT VT = Node->getOperand(2)->getSimpleValueType(0); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); - SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + unsigned CurOp = 2; SmallVector Operands; - Operands.push_back(StoreVal); - unsigned CurOp = 2 + NF; + + Operands.push_back(Node->getOperand(CurOp++)); addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands); @@ -503,29 +420,34 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked, ReplaceNode(Node, Store); } -void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked, +void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered) { SDLoc DL(Node); - unsigned NF = Node->getNumOperands() - 5; - if (IsMasked) - --NF; MVT VT = Node->getOperand(2)->getSimpleValueType(0); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); - SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + unsigned CurOp = 2; SmallVector Operands; - Operands.push_back(StoreVal); - unsigned CurOp = 2 + NF; + + Operands.push_back(Node->getOperand(CurOp++)); MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ true, Operands, /*IsLoad=*/false, &IndexVT); - assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && +#ifndef NDEBUG + // Number of element = RVVBitsPerBlock * LMUL / SEW + unsigned ContainedTyNumElts = RISCV::RVVBitsPerBlock >> Log2SEW; + auto DecodedLMUL = RISCVVType::decodeVLMUL(LMUL); + if (DecodedLMUL.second) + ContainedTyNumElts /= DecodedLMUL.first; + else + ContainedTyNumElts *= DecodedLMUL.first; + assert(ContainedTyNumElts == IndexVT.getVectorMinNumElements() && "Element count mismatch"); +#endif RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); @@ -877,6 +799,48 @@ void RISCVDAGToDAGISel::selectSF_VC_X_SE(SDNode *Node) { Opcode, DL, Node->getSimpleValueType(0), Operands)); } +static unsigned getSegInstNF(unsigned Intrinsic) { +#define INST_NF_CASE(NAME, NF) \ + case Intrinsic::riscv_##NAME##NF: \ + return NF; +#define INST_NF_CASE_MASK(NAME, NF) \ + case Intrinsic::riscv_##NAME##NF##_mask: \ + return NF; +#define INST_NF_CASE_FF(NAME, NF) \ + case Intrinsic::riscv_##NAME##NF##ff: \ + return NF; +#define INST_NF_CASE_FF_MASK(NAME, NF) \ + case Intrinsic::riscv_##NAME##NF##ff_mask: \ + return NF; +#define INST_ALL_NF_CASE_BASE(MACRO_NAME, NAME) \ + MACRO_NAME(NAME, 2) \ + MACRO_NAME(NAME, 3) \ + MACRO_NAME(NAME, 4) \ + MACRO_NAME(NAME, 5) \ + MACRO_NAME(NAME, 6) \ + MACRO_NAME(NAME, 7) \ + MACRO_NAME(NAME, 8) +#define INST_ALL_NF_CASE(NAME) \ + INST_ALL_NF_CASE_BASE(INST_NF_CASE, NAME) \ + INST_ALL_NF_CASE_BASE(INST_NF_CASE_MASK, NAME) +#define INST_ALL_NF_CASE_WITH_FF(NAME) \ + INST_ALL_NF_CASE(NAME) \ + INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF, NAME) \ + INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF_MASK, NAME) + switch (Intrinsic) { + default: + llvm_unreachable("Unexpected segment load/store intrinsic"); + INST_ALL_NF_CASE_WITH_FF(vlseg) + INST_ALL_NF_CASE(vlsseg) + INST_ALL_NF_CASE(vloxseg) + INST_ALL_NF_CASE(vluxseg) + INST_ALL_NF_CASE(vsseg) + INST_ALL_NF_CASE(vssseg) + INST_ALL_NF_CASE(vsoxseg) + INST_ALL_NF_CASE(vsuxseg) + } +} + void RISCVDAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we have already selected. if (Node->isMachineOpcode()) { @@ -1863,7 +1827,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vlseg6: case Intrinsic::riscv_vlseg7: case Intrinsic::riscv_vlseg8: { - selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false); + selectVLSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsStrided*/ false); return; } case Intrinsic::riscv_vlseg2_mask: @@ -1873,7 +1838,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vlseg6_mask: case Intrinsic::riscv_vlseg7_mask: case Intrinsic::riscv_vlseg8_mask: { - selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false); + selectVLSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsStrided*/ false); return; } case Intrinsic::riscv_vlsseg2: @@ -1883,7 +1849,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vlsseg6: case Intrinsic::riscv_vlsseg7: case Intrinsic::riscv_vlsseg8: { - selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true); + selectVLSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsStrided*/ true); return; } case Intrinsic::riscv_vlsseg2_mask: @@ -1893,7 +1860,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vlsseg6_mask: case Intrinsic::riscv_vlsseg7_mask: case Intrinsic::riscv_vlsseg8_mask: { - selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true); + selectVLSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsStrided*/ true); return; } case Intrinsic::riscv_vloxseg2: @@ -1903,7 +1871,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vloxseg6: case Intrinsic::riscv_vloxseg7: case Intrinsic::riscv_vloxseg8: - selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true); + selectVLXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsOrdered*/ true); return; case Intrinsic::riscv_vluxseg2: case Intrinsic::riscv_vluxseg3: @@ -1912,7 +1881,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vluxseg6: case Intrinsic::riscv_vluxseg7: case Intrinsic::riscv_vluxseg8: - selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false); + selectVLXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsOrdered*/ false); return; case Intrinsic::riscv_vloxseg2_mask: case Intrinsic::riscv_vloxseg3_mask: @@ -1921,7 +1891,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vloxseg6_mask: case Intrinsic::riscv_vloxseg7_mask: case Intrinsic::riscv_vloxseg8_mask: - selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true); + selectVLXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsOrdered*/ true); return; case Intrinsic::riscv_vluxseg2_mask: case Intrinsic::riscv_vluxseg3_mask: @@ -1930,7 +1901,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vluxseg6_mask: case Intrinsic::riscv_vluxseg7_mask: case Intrinsic::riscv_vluxseg8_mask: - selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); + selectVLXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsOrdered*/ false); return; case Intrinsic::riscv_vlseg8ff: case Intrinsic::riscv_vlseg7ff: @@ -1939,7 +1911,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vlseg4ff: case Intrinsic::riscv_vlseg3ff: case Intrinsic::riscv_vlseg2ff: { - selectVLSEGFF(Node, /*IsMasked*/ false); + selectVLSEGFF(Node, getSegInstNF(IntNo), /*IsMasked*/ false); return; } case Intrinsic::riscv_vlseg8ff_mask: @@ -1949,7 +1921,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vlseg4ff_mask: case Intrinsic::riscv_vlseg3ff_mask: case Intrinsic::riscv_vlseg2ff_mask: { - selectVLSEGFF(Node, /*IsMasked*/ true); + selectVLSEGFF(Node, getSegInstNF(IntNo), /*IsMasked*/ true); return; } case Intrinsic::riscv_vloxei: @@ -2081,7 +2053,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vsseg6: case Intrinsic::riscv_vsseg7: case Intrinsic::riscv_vsseg8: { - selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false); + selectVSSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsStrided*/ false); return; } case Intrinsic::riscv_vsseg2_mask: @@ -2091,7 +2064,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vsseg6_mask: case Intrinsic::riscv_vsseg7_mask: case Intrinsic::riscv_vsseg8_mask: { - selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false); + selectVSSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsStrided*/ false); return; } case Intrinsic::riscv_vssseg2: @@ -2101,7 +2075,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vssseg6: case Intrinsic::riscv_vssseg7: case Intrinsic::riscv_vssseg8: { - selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true); + selectVSSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsStrided*/ true); return; } case Intrinsic::riscv_vssseg2_mask: @@ -2111,7 +2086,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vssseg6_mask: case Intrinsic::riscv_vssseg7_mask: case Intrinsic::riscv_vssseg8_mask: { - selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true); + selectVSSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsStrided*/ true); return; } case Intrinsic::riscv_vsoxseg2: @@ -2121,7 +2097,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vsoxseg6: case Intrinsic::riscv_vsoxseg7: case Intrinsic::riscv_vsoxseg8: - selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true); + selectVSXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsOrdered*/ true); return; case Intrinsic::riscv_vsuxseg2: case Intrinsic::riscv_vsuxseg3: @@ -2130,7 +2107,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vsuxseg6: case Intrinsic::riscv_vsuxseg7: case Intrinsic::riscv_vsuxseg8: - selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false); + selectVSXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ false, + /*IsOrdered*/ false); return; case Intrinsic::riscv_vsoxseg2_mask: case Intrinsic::riscv_vsoxseg3_mask: @@ -2139,7 +2117,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vsoxseg6_mask: case Intrinsic::riscv_vsoxseg7_mask: case Intrinsic::riscv_vsoxseg8_mask: - selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true); + selectVSXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsOrdered*/ true); return; case Intrinsic::riscv_vsuxseg2_mask: case Intrinsic::riscv_vsuxseg3_mask: @@ -2148,7 +2127,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { case Intrinsic::riscv_vsuxseg6_mask: case Intrinsic::riscv_vsuxseg7_mask: case Intrinsic::riscv_vsuxseg8_mask: - selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); + selectVSXSEG(Node, getSegInstNF(IntNo), /*IsMasked*/ true, + /*IsOrdered*/ false); return; case Intrinsic::riscv_vsoxei: case Intrinsic::riscv_vsoxei_mask: @@ -2243,7 +2223,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { } break; } - case ISD::INSERT_SUBVECTOR: { + case ISD::INSERT_SUBVECTOR: + case RISCVISD::TUPLE_INSERT: { SDValue V = Node->getOperand(0); SDValue SubV = Node->getOperand(1); SDLoc DL(SubV); @@ -2284,7 +2265,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || SubVecLMUL == RISCVII::VLMUL::LMUL_F8; - assert((!IsSubVecPartReg || V.isUndef()) && + assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg || + V.isUndef()) && "Expecting lowering to have created legal INSERT_SUBVECTORs when " "the subvector is smaller than a full-sized register"); @@ -2307,7 +2289,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { ReplaceNode(Node, Insert.getNode()); return; } - case ISD::EXTRACT_SUBVECTOR: { + case ISD::EXTRACT_SUBVECTOR: + case RISCVISD::TUPLE_EXTRACT: { SDValue V = Node->getOperand(0); auto Idx = Node->getConstantOperandVal(1); MVT InVT = V.getSimpleValueType(); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h index 6dfaee0bcf8d4..1d120c13442d5 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -153,11 +153,11 @@ class RISCVDAGToDAGISel : public SelectionDAGISel { SmallVectorImpl &Operands, bool IsLoad = false, MVT *IndexVT = nullptr); - void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided); - void selectVLSEGFF(SDNode *Node, bool IsMasked); - void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered); - void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided); - void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered); + void selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided); + void selectVLSEGFF(SDNode *Node, unsigned NF, bool IsMasked); + void selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered); + void selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided); + void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered); void selectVSETVLI(SDNode *Node); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index a4445b9be9d91..4a33e8bcbaa2b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -153,6 +153,18 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; static const MVT::SimpleValueType F64VecVTs[] = { MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; + static const MVT::SimpleValueType VecTupleVTs[] = { + MVT::riscv_nxv1i8x2, MVT::riscv_nxv1i8x3, MVT::riscv_nxv1i8x4, + MVT::riscv_nxv1i8x5, MVT::riscv_nxv1i8x6, MVT::riscv_nxv1i8x7, + MVT::riscv_nxv1i8x8, MVT::riscv_nxv2i8x2, MVT::riscv_nxv2i8x3, + MVT::riscv_nxv2i8x4, MVT::riscv_nxv2i8x5, MVT::riscv_nxv2i8x6, + MVT::riscv_nxv2i8x7, MVT::riscv_nxv2i8x8, MVT::riscv_nxv4i8x2, + MVT::riscv_nxv4i8x3, MVT::riscv_nxv4i8x4, MVT::riscv_nxv4i8x5, + MVT::riscv_nxv4i8x6, MVT::riscv_nxv4i8x7, MVT::riscv_nxv4i8x8, + MVT::riscv_nxv8i8x2, MVT::riscv_nxv8i8x3, MVT::riscv_nxv8i8x4, + MVT::riscv_nxv8i8x5, MVT::riscv_nxv8i8x6, MVT::riscv_nxv8i8x7, + MVT::riscv_nxv8i8x8, MVT::riscv_nxv16i8x2, MVT::riscv_nxv16i8x3, + MVT::riscv_nxv16i8x4, MVT::riscv_nxv32i8x2}; if (Subtarget.hasVInstructions()) { auto addRegClassForRVV = [this](MVT VT) { @@ -218,6 +230,39 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, if (useRVVForFixedLengthVectorVT(VT)) addRegClassForFixedVectors(VT); } + + addRegisterClass(MVT::riscv_nxv1i8x2, &RISCV::VRN2M1RegClass); + addRegisterClass(MVT::riscv_nxv1i8x3, &RISCV::VRN3M1RegClass); + addRegisterClass(MVT::riscv_nxv1i8x4, &RISCV::VRN4M1RegClass); + addRegisterClass(MVT::riscv_nxv1i8x5, &RISCV::VRN5M1RegClass); + addRegisterClass(MVT::riscv_nxv1i8x6, &RISCV::VRN6M1RegClass); + addRegisterClass(MVT::riscv_nxv1i8x7, &RISCV::VRN7M1RegClass); + addRegisterClass(MVT::riscv_nxv1i8x8, &RISCV::VRN8M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x2, &RISCV::VRN2M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x3, &RISCV::VRN3M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x4, &RISCV::VRN4M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x5, &RISCV::VRN5M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x6, &RISCV::VRN6M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x7, &RISCV::VRN7M1RegClass); + addRegisterClass(MVT::riscv_nxv2i8x8, &RISCV::VRN8M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x2, &RISCV::VRN2M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x3, &RISCV::VRN3M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x4, &RISCV::VRN4M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x5, &RISCV::VRN5M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x6, &RISCV::VRN6M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x7, &RISCV::VRN7M1RegClass); + addRegisterClass(MVT::riscv_nxv4i8x8, &RISCV::VRN8M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x2, &RISCV::VRN2M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x3, &RISCV::VRN3M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x4, &RISCV::VRN4M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x5, &RISCV::VRN5M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x6, &RISCV::VRN6M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x7, &RISCV::VRN7M1RegClass); + addRegisterClass(MVT::riscv_nxv8i8x8, &RISCV::VRN8M1RegClass); + addRegisterClass(MVT::riscv_nxv16i8x2, &RISCV::VRN2M2RegClass); + addRegisterClass(MVT::riscv_nxv16i8x3, &RISCV::VRN3M2RegClass); + addRegisterClass(MVT::riscv_nxv16i8x4, &RISCV::VRN4M2RegClass); + addRegisterClass(MVT::riscv_nxv32i8x2, &RISCV::VRN2M4RegClass); } // Compute derived properties from the register classes. @@ -866,6 +911,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, } } + for (MVT VT : VecTupleVTs) { + if (!isTypeLegal(VT)) + continue; + + setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom); + } + // Expand various CCs to best match the RVV ISA, which natively supports UNE // but no other unordered comparisons, and supports all ordered comparisons // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization @@ -1538,7 +1590,10 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, MemTy = MemTy->getScalarType(); Info.memVT = getValueType(DL, MemTy); - Info.align = Align(DL.getTypeSizeInBits(MemTy->getScalarType()) / 8); + if (MemTy->isTargetExtTy()) + Info.align = DL.getABITypeAlign(MemTy); + else + Info.align = Align(DL.getTypeSizeInBits(MemTy->getScalarType()) / 8); Info.size = MemoryLocation::UnknownSize; Info.flags |= IsStore ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad; @@ -1635,7 +1690,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vlseg6ff: case Intrinsic::riscv_vlseg7ff: case Intrinsic::riscv_vlseg8ff: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3, /*IsStore*/ false, /*IsUnitStrided*/ false, /*UsePtrVal*/ true); case Intrinsic::riscv_vlseg2_mask: @@ -1652,7 +1707,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vlseg6ff_mask: case Intrinsic::riscv_vlseg7ff_mask: case Intrinsic::riscv_vlseg8ff_mask: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 5, /*IsStore*/ false, /*IsUnitStrided*/ false, /*UsePtrVal*/ true); case Intrinsic::riscv_vlsseg2: @@ -1676,7 +1731,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vluxseg6: case Intrinsic::riscv_vluxseg7: case Intrinsic::riscv_vluxseg8: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4, /*IsStore*/ false, /*IsUnitStrided*/ false); case Intrinsic::riscv_vlsseg2_mask: @@ -1700,7 +1755,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vluxseg6_mask: case Intrinsic::riscv_vluxseg7_mask: case Intrinsic::riscv_vluxseg8_mask: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 5, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 6, /*IsStore*/ false, /*IsUnitStrided*/ false); case Intrinsic::riscv_vsseg2: @@ -1710,7 +1765,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vsseg6: case Intrinsic::riscv_vsseg7: case Intrinsic::riscv_vsseg8: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3, /*IsStore*/ true, /*IsUnitStrided*/ false); case Intrinsic::riscv_vsseg2_mask: @@ -1720,7 +1775,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vsseg6_mask: case Intrinsic::riscv_vsseg7_mask: case Intrinsic::riscv_vsseg8_mask: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4, /*IsStore*/ true, /*IsUnitStrided*/ false); case Intrinsic::riscv_vssseg2: @@ -1744,7 +1799,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vsuxseg6: case Intrinsic::riscv_vsuxseg7: case Intrinsic::riscv_vsuxseg8: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4, /*IsStore*/ true, /*IsUnitStrided*/ false); case Intrinsic::riscv_vssseg2_mask: @@ -1768,7 +1823,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::riscv_vsuxseg6_mask: case Intrinsic::riscv_vsuxseg7_mask: case Intrinsic::riscv_vsuxseg8_mask: - return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4, + return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 5, /*IsStore*/ true, /*IsUnitStrided*/ false); } @@ -2383,6 +2438,27 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, } RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { + if (VT.isRISCVVectorTuple()) { + if (VT.SimpleTy >= MVT::riscv_nxv1i8x2 && + VT.SimpleTy <= MVT::riscv_nxv1i8x8) + return RISCVII::LMUL_F8; + if (VT.SimpleTy >= MVT::riscv_nxv2i8x2 && + VT.SimpleTy <= MVT::riscv_nxv2i8x8) + return RISCVII::LMUL_F4; + if (VT.SimpleTy >= MVT::riscv_nxv4i8x2 && + VT.SimpleTy <= MVT::riscv_nxv4i8x8) + return RISCVII::LMUL_F2; + if (VT.SimpleTy >= MVT::riscv_nxv8i8x2 && + VT.SimpleTy <= MVT::riscv_nxv8i8x8) + return RISCVII::LMUL_1; + if (VT.SimpleTy >= MVT::riscv_nxv16i8x2 && + VT.SimpleTy <= MVT::riscv_nxv16i8x4) + return RISCVII::LMUL_2; + if (VT.SimpleTy == MVT::riscv_nxv32i8x2) + return RISCVII::LMUL_4; + llvm_unreachable("Invalid vector tuple type LMUL."); + } + assert(VT.isScalableVector() && "Expecting a scalable vector type"); unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); if (VT.getVectorElementType() == MVT::i1) @@ -2450,6 +2526,44 @@ unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { } unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { + if (VT.isRISCVVectorTuple()) { + unsigned NF = VT.getRISCVVectorTupleNumFields(); + unsigned RegsPerField = std::max(1U, (unsigned)VT.getSizeInBits() / + (NF * RISCV::RVVBitsPerBlock)); + switch (RegsPerField) { + case 1: + if (NF == 2) + return RISCV::VRN2M1RegClassID; + if (NF == 3) + return RISCV::VRN3M1RegClassID; + if (NF == 4) + return RISCV::VRN4M1RegClassID; + if (NF == 5) + return RISCV::VRN5M1RegClassID; + if (NF == 6) + return RISCV::VRN6M1RegClassID; + if (NF == 7) + return RISCV::VRN7M1RegClassID; + if (NF == 8) + return RISCV::VRN8M1RegClassID; + break; + case 2: + if (NF == 2) + return RISCV::VRN2M2RegClassID; + if (NF == 3) + return RISCV::VRN3M2RegClassID; + if (NF == 4) + return RISCV::VRN4M2RegClassID; + break; + case 4: + assert(NF == 2); + return RISCV::VRN2M4RegClassID; + default: + break; + } + llvm_unreachable("Invalid vector tuple type RegClass."); + } + if (VT.getVectorElementType() == MVT::i1) return RISCV::VRRegClassID; return getRegClassIDForLMUL(getLMUL(VT)); @@ -2470,6 +2584,21 @@ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( "Register classes not ordered"); unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); + + // If VecVT is a vector tuple type, either it's the tuple type with same + // RegClass with SubVecVT or SubVecVT is a actually a subvector of the VecVT. + if (VecVT.isRISCVVectorTuple()) { + if (VecRegClassID == SubRegClassID) + return {RISCV::NoSubRegister, 0}; + + assert(SubVecVT.isScalableVector() && + "Only allow scalable vector subvector."); + assert(getLMUL(VecVT) == getLMUL(SubVecVT) && + "Invalid vector tuple insert/extract for vector and subvector with " + "different LMUL."); + return {getSubregIndexByMVT(VecVT, InsertExtractIdx), 0}; + } + // Try to compose a subregister index that takes us from the incoming // LMUL>1 register class down to the outgoing one. At each step we half // the LMUL: @@ -6774,18 +6903,92 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, } return Vec; } - case ISD::LOAD: + case ISD::LOAD: { + auto *Load = cast(Op); + EVT VecTy = Load->getMemoryVT(); + // Handle normal vector tuple load. + if (VecTy.isRISCVVectorTuple()) { + SDLoc DL(Op); + MVT XLenVT = Subtarget.getXLenVT(); + unsigned NF = VecTy.getRISCVVectorTupleNumFields(); + unsigned Sz = VecTy.getSizeInBits(); + unsigned NumElts = Sz / (NF * 8); + int Log2LMUL = Log2_64(NumElts) - 3; + + auto Flag = SDNodeFlags(); + Flag.setNoUnsignedWrap(true); + SDValue Ret = DAG.getUNDEF(VecTy); + SDValue BasePtr = Load->getBasePtr(); + SDValue VROffset = DAG.getNode(RISCVISD::READ_VLENB, DL, XLenVT); + VROffset = + DAG.getNode(ISD::SHL, DL, XLenVT, VROffset, + DAG.getConstant(std::max(Log2LMUL, 0), DL, XLenVT)); + SmallVector OutChains; + + // Load NF vector registers and combine them to a vector tuple. + for (unsigned i = 0; i < NF; ++i) { + SDValue LoadVal = DAG.getLoad( + MVT::getScalableVectorVT(MVT::i8, NumElts), DL, Load->getChain(), + BasePtr, MachinePointerInfo(Load->getAddressSpace()), Align(8)); + OutChains.push_back(LoadVal.getValue(1)); + Ret = DAG.getNode(RISCVISD::TUPLE_INSERT, DL, VecTy, Ret, LoadVal, + DAG.getVectorIdxConstant(i, DL)); + BasePtr = DAG.getNode(ISD::ADD, DL, XLenVT, BasePtr, VROffset, Flag); + } + return DAG.getMergeValues( + {Ret, DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains)}, DL); + } + if (auto V = expandUnalignedRVVLoad(Op, DAG)) return V; if (Op.getValueType().isFixedLengthVector()) return lowerFixedLengthVectorLoadToRVV(Op, DAG); return Op; - case ISD::STORE: + } + case ISD::STORE: { + auto *Store = cast(Op); + SDValue StoredVal = Store->getValue(); + EVT VecTy = StoredVal.getValueType(); + // Handle normal vector tuple store. + if (VecTy.isRISCVVectorTuple()) { + SDLoc DL(Op); + MVT XLenVT = Subtarget.getXLenVT(); + unsigned NF = VecTy.getRISCVVectorTupleNumFields(); + unsigned Sz = VecTy.getSizeInBits(); + unsigned NumElts = Sz / (NF * 8); + int Log2LMUL = Log2_64(NumElts) - 3; + + auto Flag = SDNodeFlags(); + Flag.setNoUnsignedWrap(true); + SDValue Ret; + SDValue Chain = Store->getChain(); + SDValue BasePtr = Store->getBasePtr(); + SDValue VROffset = DAG.getNode(RISCVISD::READ_VLENB, DL, XLenVT); + VROffset = + DAG.getNode(ISD::SHL, DL, XLenVT, VROffset, + DAG.getConstant(std::max(Log2LMUL, 0), DL, XLenVT)); + + // Extract subregisters in a vector tuple and store them individually. + for (unsigned i = 0; i < NF; ++i) { + auto Extract = DAG.getNode(RISCVISD::TUPLE_EXTRACT, DL, + MVT::getScalableVectorVT(MVT::i8, NumElts), + StoredVal, DAG.getVectorIdxConstant(i, DL)); + Ret = DAG.getStore(Chain, DL, Extract, BasePtr, + MachinePointerInfo(Store->getAddressSpace()), + Store->getOriginalAlign(), + Store->getMemOperand()->getFlags()); + Chain = Ret.getValue(0); + BasePtr = DAG.getNode(ISD::ADD, DL, XLenVT, BasePtr, VROffset, Flag); + } + return Ret; + } + if (auto V = expandUnalignedRVVStore(Op, DAG)) return V; if (Op.getOperand(1).getValueType().isFixedLengthVector()) return lowerFixedLengthVectorStoreToRVV(Op, DAG); return Op; + } case ISD::MLOAD: case ISD::VP_LOAD: return lowerMaskedLoad(Op, DAG); @@ -8948,6 +9151,21 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, switch (IntNo) { default: break; // Don't custom lower most intrinsics. + case Intrinsic::riscv_tuple_insert: { + SDValue Vec = Op.getOperand(1); + SDValue SubVec = Op.getOperand(2); + SDValue Index = Op.getOperand(3); + + return DAG.getNode(RISCVISD::TUPLE_INSERT, DL, Op.getValueType(), Vec, + SubVec, Index); + } + case Intrinsic::riscv_tuple_extract: { + SDValue Vec = Op.getOperand(1); + SDValue Index = Op.getOperand(2); + + return DAG.getNode(RISCVISD::TUPLE_EXTRACT, DL, Op.getValueType(), Vec, + Index); + } case Intrinsic::thread_pointer: { EVT PtrVT = getPointerTy(DAG.getDataLayout()); return DAG.getRegister(RISCV::X4, PtrVT); @@ -9228,25 +9446,33 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, MVT XLenVT = Subtarget.getXLenVT(); MVT VT = Op->getSimpleValueType(0); MVT ContainerVT = getContainerForFixedLengthVector(VT); + unsigned Sz = NF * ContainerVT.getVectorMinNumElements() * + ContainerVT.getScalarSizeInBits(); + EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF); SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT); auto *Load = cast(Op); - SmallVector ContainerVTs(NF, ContainerVT); - ContainerVTs.push_back(MVT::Other); - SDVTList VTs = DAG.getVTList(ContainerVTs); - SmallVector Ops = {Load->getChain(), IntID}; - Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT)); - Ops.push_back(Op.getOperand(2)); - Ops.push_back(VL); + + SDVTList VTs = DAG.getVTList({VecTupTy, MVT::Other}); + SDValue Ops[] = { + Load->getChain(), + IntID, + DAG.getUNDEF(VecTupTy), + Op.getOperand(2), + VL, + DAG.getTargetConstant(Log2_64(VT.getScalarSizeInBits()), DL, XLenVT)}; SDValue Result = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, Load->getMemoryVT(), Load->getMemOperand()); SmallVector Results; - for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++) - Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx), - DAG, Subtarget)); - Results.push_back(Result.getValue(NF)); + for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++) { + SDValue SubVec = + DAG.getNode(RISCVISD::TUPLE_EXTRACT, DL, ContainerVT, + Result.getValue(0), DAG.getVectorIdxConstant(RetIdx, DL)); + Results.push_back(convertFromScalableVector(VT, SubVec, DAG, Subtarget)); + } + Results.push_back(Result.getValue(1)); return DAG.getMergeValues(Results, DL); } case Intrinsic::riscv_sf_vc_v_x_se: @@ -9307,17 +9533,31 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op, MVT XLenVT = Subtarget.getXLenVT(); MVT VT = Op->getOperand(2).getSimpleValueType(); MVT ContainerVT = getContainerForFixedLengthVector(VT); + unsigned Sz = NF * ContainerVT.getVectorMinNumElements() * + ContainerVT.getScalarSizeInBits(); + EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF); SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); SDValue IntID = DAG.getTargetConstant(VssegInts[NF - 2], DL, XLenVT); SDValue Ptr = Op->getOperand(NF + 2); auto *FixedIntrinsic = cast(Op); - SmallVector Ops = {FixedIntrinsic->getChain(), IntID}; + + SDValue StoredVal = DAG.getUNDEF(VecTupTy); for (unsigned i = 0; i < NF; i++) - Ops.push_back(convertToScalableVector( - ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget)); - Ops.append({Ptr, VL}); + StoredVal = DAG.getNode( + RISCVISD::TUPLE_INSERT, DL, VecTupTy, StoredVal, + convertToScalableVector( + ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget), + DAG.getVectorIdxConstant(i, DL)); + + SDValue Ops[] = { + FixedIntrinsic->getChain(), + IntID, + StoredVal, + Ptr, + VL, + DAG.getTargetConstant(Log2_64(VT.getScalarSizeInBits()), DL, XLenVT)}; return DAG.getMemIntrinsicNode( ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops, @@ -18595,6 +18835,66 @@ static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, RISCV::V20M4}; static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; +static const MCPhysReg ArgVRN2M1s[] = { + RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12, + RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16, + RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20, + RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23}; +static const MCPhysReg ArgVRN3M1s[] = { + RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12, + RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15, + RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18, + RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21, + RISCV::V20_V21_V22, RISCV::V21_V22_V23}; +static const MCPhysReg ArgVRN4M1s[] = { + RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13, + RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16, + RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19, + RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22, + RISCV::V20_V21_V22_V23}; +static const MCPhysReg ArgVRN5M1s[] = { + RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13, + RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15, + RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17, + RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19, + RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21, + RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23}; +static const MCPhysReg ArgVRN6M1s[] = { + RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14, + RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16, + RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18, + RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20, + RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22, + RISCV::V18_V19_V20_V21_V22_V23}; +static const MCPhysReg ArgVRN7M1s[] = { + RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15, + RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17, + RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19, + RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21, + RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23}; +static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15, + RISCV::V9_V10_V11_V12_V13_V14_V15_V16, + RISCV::V10_V11_V12_V13_V14_V15_V16_V17, + RISCV::V11_V12_V13_V14_V15_V16_V17_V18, + RISCV::V12_V13_V14_V15_V16_V17_V18_V19, + RISCV::V13_V14_V15_V16_V17_V18_V19_V20, + RISCV::V14_V15_V16_V17_V18_V19_V20_V21, + RISCV::V15_V16_V17_V18_V19_V20_V21_V22, + RISCV::V16_V17_V18_V19_V20_V21_V22_V23}; +static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2, + RISCV::V12M2_V14M2, RISCV::V14M2_V16M2, + RISCV::V16M2_V18M2, RISCV::V18M2_V20M2, + RISCV::V20M2_V22M2}; +static const MCPhysReg ArgVRN3M2s[] = { + RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2, + RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2, + RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2}; +static const MCPhysReg ArgVRN4M2s[] = { + RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2, + RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2, + RISCV::V16M2_V18M2_V20M2_V22M2}; +static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4, + RISCV::V16M4_V20M4}; ArrayRef RISCV::getArgGPRs(const RISCVABI::ABI ABI) { // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except @@ -18694,6 +18994,28 @@ static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, return State.AllocateReg(ArgVRM4s); if (RC == &RISCV::VRM8RegClass) return State.AllocateReg(ArgVRM8s); + if (RC == &RISCV::VRN2M1RegClass) + return State.AllocateReg(ArgVRN2M1s); + if (RC == &RISCV::VRN3M1RegClass) + return State.AllocateReg(ArgVRN3M1s); + if (RC == &RISCV::VRN4M1RegClass) + return State.AllocateReg(ArgVRN4M1s); + if (RC == &RISCV::VRN5M1RegClass) + return State.AllocateReg(ArgVRN5M1s); + if (RC == &RISCV::VRN6M1RegClass) + return State.AllocateReg(ArgVRN6M1s); + if (RC == &RISCV::VRN7M1RegClass) + return State.AllocateReg(ArgVRN7M1s); + if (RC == &RISCV::VRN8M1RegClass) + return State.AllocateReg(ArgVRN8M1s); + if (RC == &RISCV::VRN2M2RegClass) + return State.AllocateReg(ArgVRN2M2s); + if (RC == &RISCV::VRN3M2RegClass) + return State.AllocateReg(ArgVRN3M2s); + if (RC == &RISCV::VRN4M2RegClass) + return State.AllocateReg(ArgVRN4M2s); + if (RC == &RISCV::VRN2M4RegClass) + return State.AllocateReg(ArgVRN2M4s); llvm_unreachable("Unhandled register class for ValueType"); } @@ -18870,7 +19192,7 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, Reg = State.AllocateReg(ArgFPR32s); else if (ValVT == MVT::f64 && !UseGPRForF64) Reg = State.AllocateReg(ArgFPR64s); - else if (ValVT.isVector()) { + else if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) { Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI); if (!Reg) { // For return values, the vector must be passed fully via registers or @@ -18921,7 +19243,8 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, } assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || - (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) && + (TLI.getSubtarget().hasVInstructions() && + (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) && "Expected an XLenVT or vector types at this stage"); if (Reg) { @@ -20290,6 +20613,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(CZERO_EQZ) NODE_NAME_CASE(CZERO_NEZ) NODE_NAME_CASE(SW_GUARDED_BRIND) + NODE_NAME_CASE(TUPLE_INSERT) + NODE_NAME_CASE(TUPLE_EXTRACT) NODE_NAME_CASE(SF_VC_XV_SE) NODE_NAME_CASE(SF_VC_IV_SE) NODE_NAME_CASE(SF_VC_VV_SE) @@ -20380,8 +20705,14 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, break; } } else if (Constraint == "vr") { - for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass, - &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { + for (const auto *RC : + {&RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass, + &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN3M1RegClass, + &RISCV::VRN4M1RegClass, &RISCV::VRN5M1RegClass, + &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, + &RISCV::VRN8M1RegClass, &RISCV::VRN2M2RegClass, + &RISCV::VRN3M2RegClass, &RISCV::VRN4M2RegClass, + &RISCV::VRN2M4RegClass}) { if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) return std::make_pair(0U, RC); } @@ -21221,6 +21552,23 @@ bool RISCVTargetLowering::splitValueIntoRegisterParts( return true; } } + + if (ValueVT.isRISCVVectorTuple() && PartVT.isRISCVVectorTuple()) { + unsigned ValNF = ValueVT.getRISCVVectorTupleNumFields(); + unsigned ValLMUL = + divideCeil(ValueVT.getSizeInBits(), ValNF * RISCV::RVVBitsPerBlock); + unsigned PartNF = PartVT.getRISCVVectorTupleNumFields(); + unsigned PartLMUL = + divideCeil(PartVT.getSizeInBits(), PartNF * RISCV::RVVBitsPerBlock); + assert(ValNF == PartNF && ValLMUL == PartLMUL && + "RISC-V vector tuple type only accepts same register class type " + "TUPLE_INSERT"); + + Val = DAG.getNode(RISCVISD::TUPLE_INSERT, DL, PartVT, DAG.getUNDEF(PartVT), + Val, DAG.getVectorIdxConstant(0, DL)); + Parts[0] = Val; + return true; + } return false; } @@ -21498,7 +21846,7 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad( return false; Function *VlsegNFunc; - Value *VL; + Value *VL, *Return; Type *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen()); SmallVector Ops; @@ -21507,6 +21855,8 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad( LI->getModule(), FixedVlsegIntrIds[Factor - 2], {ResVTy, LI->getPointerOperandType(), XLenTy}); VL = ConstantInt::get(XLenTy, FVTy->getNumElements()); + Ops.append({LI->getPointerOperand(), VL}); + Return = Builder.CreateCall(VlsegNFunc, Ops); } else { static const Intrinsic::ID IntrIds[] = { Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3, @@ -21514,16 +21864,34 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad( Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7, Intrinsic::riscv_vlseg8}; + unsigned SEW = ResVTy->getElementType()->getScalarSizeInBits(); + unsigned NumElts = ResVTy->getElementCount().getKnownMinValue(); + Type *VecTupTy = TargetExtType::get( + LI->getContext(), "riscv.vector.tuple", + ScalableVectorType::get(Type::getInt8Ty(LI->getContext()), + NumElts * SEW / 8), + 2); + VlsegNFunc = Intrinsic::getDeclaration(LI->getModule(), IntrIds[Factor - 2], - {ResVTy, XLenTy}); + {VecTupTy, XLenTy}); VL = Constant::getAllOnesValue(XLenTy); - Ops.append(Factor, PoisonValue::get(ResVTy)); - } - Ops.append({LI->getPointerOperand(), VL}); + Ops.append({PoisonValue::get(VecTupTy), LI->getPointerOperand(), VL, + ConstantInt::get(XLenTy, Log2_64(SEW))}); + Value *Vlseg = Builder.CreateCall(VlsegNFunc, Ops); - Value *Vlseg = Builder.CreateCall(VlsegNFunc, Ops); - DI->replaceAllUsesWith(Vlseg); + SmallVector AggrTypes{Factor, ResVTy}; + Return = PoisonValue::get(StructType::get(LI->getContext(), AggrTypes)); + Function *VecExtractFunc = Intrinsic::getDeclaration( + LI->getModule(), Intrinsic::riscv_tuple_extract, {ResVTy, VecTupTy}); + for (unsigned i = 0; i < Factor; ++i) { + Value *VecExtract = + Builder.CreateCall(VecExtractFunc, {Vlseg, Builder.getInt32(i)}); + Return = Builder.CreateInsertValue(Return, VecExtract, i); + } + } + + DI->replaceAllUsesWith(Return); return true; } @@ -21557,6 +21925,8 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore( SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, SI->getPointerOperandType(), XLenTy}); VL = ConstantInt::get(XLenTy, FVTy->getNumElements()); + Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1), + SI->getPointerOperand(), VL}); } else { static const Intrinsic::ID IntrIds[] = { Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3, @@ -21564,13 +21934,29 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore( Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7, Intrinsic::riscv_vsseg8}; + unsigned SEW = InVTy->getElementType()->getScalarSizeInBits(); + unsigned NumElts = InVTy->getElementCount().getKnownMinValue(); + Type *VecTupTy = TargetExtType::get( + SI->getContext(), "riscv.vector.tuple", + ScalableVectorType::get(Type::getInt8Ty(SI->getContext()), + NumElts * SEW / 8), + 2); + VssegNFunc = Intrinsic::getDeclaration(SI->getModule(), IntrIds[Factor - 2], - {InVTy, XLenTy}); + {VecTupTy, XLenTy}); + VL = Constant::getAllOnesValue(XLenTy); - } - Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1), - SI->getPointerOperand(), VL}); + Function *VecInsertFunc = Intrinsic::getDeclaration( + SI->getModule(), Intrinsic::riscv_tuple_insert, {VecTupTy, InVTy}); + Value *StoredVal = PoisonValue::get(VecTupTy); + for (unsigned i = 0; i < Factor; ++i) + StoredVal = Builder.CreateCall( + VecInsertFunc, {StoredVal, II->getOperand(i), Builder.getInt32(i)}); + + Builder.CreateCall(VssegNFunc, {StoredVal, SI->getPointerOperand(), VL, + ConstantInt::get(XLenTy, Log2_64(SEW))}); + } return true; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 53723c1aa8120..9ae35173ba0cb 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -441,6 +441,10 @@ enum NodeType : unsigned { SF_VC_V_VVW_SE, SF_VC_V_FVW_SE, + // RISC-V vector tuple type version of INSERT_SUBVECTOR/EXTRACT_SUBVECTOR. + TUPLE_INSERT, + TUPLE_EXTRACT, + // FP to 32 bit int conversions for RV64. These are used to keep track of the // result being sign extended to 64 bit. These saturate out of range inputs. STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE, diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index 73649129e4f93..4d5c0a7bef941 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -616,15 +616,40 @@ def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add // The register class is added for inline assembly for vector mask types. def VM : VReg; +defvar VTupM1N2VTs = [riscv_nxv8i8x2, riscv_nxv4i8x2, riscv_nxv2i8x2, riscv_nxv1i8x2]; +defvar VTupM1N3VTs = [riscv_nxv8i8x3, riscv_nxv4i8x3, riscv_nxv2i8x3, riscv_nxv1i8x3]; +defvar VTupM1N4VTs = [riscv_nxv8i8x4, riscv_nxv4i8x4, riscv_nxv2i8x4, riscv_nxv1i8x4]; +defvar VTupM1N5VTs = [riscv_nxv8i8x5, riscv_nxv4i8x5, riscv_nxv2i8x5, riscv_nxv1i8x5]; +defvar VTupM1N6VTs = [riscv_nxv8i8x6, riscv_nxv4i8x6, riscv_nxv2i8x6, riscv_nxv1i8x6]; +defvar VTupM1N7VTs = [riscv_nxv8i8x7, riscv_nxv4i8x7, riscv_nxv2i8x7, riscv_nxv1i8x7]; +defvar VTupM1N8VTs = [riscv_nxv8i8x8, riscv_nxv4i8x8, riscv_nxv2i8x8, riscv_nxv1i8x8]; +defvar VTupM2N2VTs = [riscv_nxv16i8x2]; +defvar VTupM2N3VTs = [riscv_nxv16i8x3]; +defvar VTupM2N4VTs = [riscv_nxv16i8x4]; +defvar VTupM4N2VTs = [riscv_nxv32i8x2]; +class VTupRegList { + list L = !cond(!and(!eq(LMUL, 1), !eq(NF, 2)): VTupM1N2VTs, + !and(!eq(LMUL, 1), !eq(NF, 3)): VTupM1N3VTs, + !and(!eq(LMUL, 1), !eq(NF, 4)): VTupM1N4VTs, + !and(!eq(LMUL, 1), !eq(NF, 5)): VTupM1N5VTs, + !and(!eq(LMUL, 1), !eq(NF, 6)): VTupM1N6VTs, + !and(!eq(LMUL, 1), !eq(NF, 7)): VTupM1N7VTs, + !and(!eq(LMUL, 1), !eq(NF, 8)): VTupM1N8VTs, + !and(!eq(LMUL, 2), !eq(NF, 2)): VTupM2N2VTs, + !and(!eq(LMUL, 2), !eq(NF, 3)): VTupM2N3VTs, + !and(!eq(LMUL, 2), !eq(NF, 4)): VTupM2N4VTs, + !and(!eq(LMUL, 4), !eq(NF, 2)): VTupM4N2VTs); +} + foreach m = LMULList in { foreach nf = NFList.L in { let NF = nf in { def "VRN" # nf # "M" # m # "NoV0" - : VReg<[untyped], + : VReg.L, (add !cast("VN" # nf # "M" # m # "NoV0")), m>; def "VRN" # nf # "M" # m - : VReg<[untyped], + : VReg.L, (add !cast("VN" # nf # "M" # m # "NoV0"), !cast("VN" # nf # "M" # m # "V0")), m>; diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll index a8e99ddf32d63..899aad6ed7232 100644 --- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll @@ -55,32 +55,33 @@ define void @_Z3foov() { ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40) ; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44) -; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44) -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vl2r.v v10, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: add a1, a1, a2 -; CHECK-NEXT: vl2r.v v12, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: add a1, a1, a2 -; CHECK-NEXT: vl2r.v v14, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: add a1, a1, a2 -; CHECK-NEXT: vl2r.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: lui a0, 1048572 ; CHECK-NEXT: addi a0, a0, 928 ; CHECK-NEXT: vmsbc.vx v0, v8, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2r.v v12, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2r.v v14, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl1r.v v14, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu -; CHECK-NEXT: vsext.vf2 v10, v8, v0.t +; CHECK-NEXT: vsext.vf2 v8, v14, v0.t +; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44) +; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44) +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; CHECK-NEXT: vle16.v v14, (a0) ; CHECK-NEXT: lui a0, %hi(var_47) ; CHECK-NEXT: addi a0, a0, %lo(var_47) -; CHECK-NEXT: vsseg4e16.v v10, (a0) +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: mul a0, a0, a1 @@ -100,7 +101,11 @@ entry: %8 = tail call @llvm.riscv.vmsbc.nxv8i16.i16.i64( %6, i16 -15456, i64 2) %9 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1) %10 = tail call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( %0, %1, %8, i64 2, i64 0) - tail call void @llvm.riscv.vsseg4.nxv8i16.i64( %10, %2, %3, %4, ptr nonnull @var_47, i64 2) + %v_0 = call target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) poison, %10, i32 0) + %v_1 = call target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %v_0, %2, i32 1) + %v_2 = call target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %v_1, %3, i32 2) + %v_3 = call target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %v_2, %4, i32 3) + tail call void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", , 4) %v_3, ptr nonnull @var_47, i64 2, i64 4) ret void } @@ -114,4 +119,6 @@ declare @llvm.riscv.vmsbc.nxv8i16.i16.i64(, declare @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(, , , i64, i64 immarg) -declare void @llvm.riscv.vsseg4.nxv8i16.i64(, , , , ptr nocapture, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), , i32) + +declare void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", , 4), ptr nocapture, i64, i64) diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll index 9971cb7821ad1..0f3fdf08696d6 100644 --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -123,8 +123,8 @@ define void @last_chance_recoloring_failure() { ; SUBREGLIVENESS-NEXT: addi sp, sp, 32 ; SUBREGLIVENESS-NEXT: ret entry: - %i = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( undef, undef, ptr nonnull poison, poison, i64 55) - %i1 = extractvalue { , } %i, 0 + %i = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", , 2) undef, ptr nonnull poison, poison, i64 55, i64 4) + %i1 = tail call @llvm.riscv.tuple.extract.v16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %i, i32 0) %i2 = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, poison, poison, zeroinitializer, i64 7, i64 36, i64 0) call void @func() %i3 = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( poison, poison, poison, poison, i64 32, i64 0) @@ -136,7 +136,8 @@ entry: } declare void @func() -declare { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( , , ptr nocapture, , i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", , 2), ptr nocapture, , i64, i64) +declare @llvm.riscv.tuple.extract.v16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), i32) declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(, , , , i64, i64, i64 immarg) declare @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(, , , , i64, i64 immarg) declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(, , , i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll new file mode 100644 index 0000000000000..82a3ac4a74d17 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll @@ -0,0 +1,112 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +target triple = "riscv64-unknown-unknown-elf" + +define target("riscv.vector.tuple", , 5) @load_store_m1x5(target("riscv.vector.tuple", , 5) %tuple) { +; CHECK-LABEL: load_store_m1x5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: csrrs a1, vlenb, zero +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: vs1r.v v9, (a2) +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vs1r.v v10, (a3) +; CHECK-NEXT: add a4, a3, a1 +; CHECK-NEXT: vs1r.v v11, (a4) +; CHECK-NEXT: add a1, a4, a1 +; CHECK-NEXT: vs1r.v v12, (a1) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vl1re8.v v8, (a0) +; CHECK-NEXT: vl1re8.v v9, (a2) +; CHECK-NEXT: vl1re8.v v10, (a3) +; CHECK-NEXT: vl1re8.v v11, (a4) +; CHECK-NEXT: vl1re8.v v12, (a1) +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %tuple.addr = alloca target("riscv.vector.tuple", , 5), align 1 + store target("riscv.vector.tuple", , 5) %tuple, ptr %tuple.addr, align 1 + call void asm sideeffect "", + "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() + %0 = load target("riscv.vector.tuple", , 5), ptr %tuple.addr, align 1 + ret target("riscv.vector.tuple", , 5) %0 +} + +define target("riscv.vector.tuple", , 2) @load_store_m2x2(target("riscv.vector.tuple", , 2) %tuple) { +; CHECK-LABEL: load_store_m2x2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: csrrs a1, vlenb, zero +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: add a1, a0, a1 +; CHECK-NEXT: vs2r.v v10, (a1) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vl2re8.v v8, (a0) +; CHECK-NEXT: vl2re8.v v10, (a1) +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %tuple.addr = alloca target("riscv.vector.tuple", , 2), align 1 + store target("riscv.vector.tuple", , 2) %tuple, ptr %tuple.addr, align 1 + call void asm sideeffect "", + "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() + %0 = load target("riscv.vector.tuple", , 2), ptr %tuple.addr, align 1 + ret target("riscv.vector.tuple", , 2) %0 +} + +define target("riscv.vector.tuple", , 2) @load_store_m4x2(target("riscv.vector.tuple", , 2) %tuple) { +; CHECK-LABEL: load_store_m4x2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: csrrs a1, vlenb, zero +; CHECK-NEXT: slli a1, a1, 2 +; CHECK-NEXT: add a1, a0, a1 +; CHECK-NEXT: vs4r.v v12, (a1) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vl4re8.v v8, (a0) +; CHECK-NEXT: vl4re8.v v12, (a1) +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %tuple.addr = alloca target("riscv.vector.tuple", , 2), align 1 + store target("riscv.vector.tuple", , 2) %tuple, ptr %tuple.addr, align 1 + call void asm sideeffect "", + "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() + %0 = load target("riscv.vector.tuple", , 2), ptr %tuple.addr, align 1 + ret target("riscv.vector.tuple", , 2) %0 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll index 6dbebb656b66b..967a58b45a599 100644 --- a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll @@ -420,3 +420,61 @@ entry: %0 = tail call asm "vmand.mm $0, $1, $2", "={v0},{v1},{v2}"( %in, %in2) ret %0 } + +define void @test_vector_tuple_type0(target("riscv.vector.tuple", , 3) %val, ptr %base) nounwind { +; CHECK-LABEL: test_vector_tuple_type0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + tail call void asm "vsseg3e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", , 3) %val, ptr %base) + ret void +} + +define void @test_vector_tuple_type1(target("riscv.vector.tuple", , 3) %val, ptr %base) nounwind { +; CHECK-LABEL: test_vector_tuple_type1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + tail call void asm "vsseg3e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", , 3) %val, ptr %base) + ret void +} + +define void @test_vector_tuple_type2(target("riscv.vector.tuple", , 4) %val, target("riscv.vector.tuple", , 7) %val2, target("riscv.vector.tuple", , 7) %val3, ptr %base) nounwind { +; CHECK-LABEL: test_vector_tuple_type2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl1r.v v23, (a0) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vl1r.v v24, (a0) +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vl1r.v v25, (a0) +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vl1r.v v26, (a0) +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vl1r.v v27, (a0) +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vl1r.v v28, (a0) +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vl1r.v v29, (a0) +; CHECK-NEXT: #APP +; CHECK-NEXT: vsseg3e8.v v8, (a1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vsseg7e8.v v16, (a1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vsseg7e8.v v23, (a1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + tail call void asm "vsseg3e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", , 4) %val, ptr %base) + tail call void asm "vsseg7e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", , 7) %val2, ptr %base) + tail call void asm "vsseg7e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", , 7) %val3, ptr %base) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll index b6f9d319fe57d..06bce82efb313 100644 --- a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll @@ -4,16 +4,15 @@ ; This test previously crashed with an error "ran out of registers during register allocation" -declare void @llvm.riscv.vsseg2.mask.nxv16i16(,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg2_mask_nxv16i16( %val, ptr %base, %mask, i32 %vl) { +define void @test_vsseg2_mask_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl) { ; CHECK-LABEL: test_vsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll index 9f4718db1dfcc..c12df627b49d6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -76,10 +76,10 @@ define @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, ptr %base, i32 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -153,10 +153,10 @@ define @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, ptr %base, i32 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -233,10 +233,10 @@ define @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, ptr %base, i32 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -313,10 +313,10 @@ define @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, ptr %base, i32 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -403,15 +403,15 @@ define @spill_zvlsseg3_nxv4i32(ptr %base, i32 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, ptr %base, i32 %vl) + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.nxv2i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.nxv4i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.nxv8i32(,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, ptr , i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr , i32, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll index 4ea9fab6a5a08..afb4b1560728c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -76,10 +76,10 @@ define @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, ptr %base, i64 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -153,10 +153,10 @@ define @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, ptr %base, i64 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -233,10 +233,10 @@ define @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, ptr %base, i64 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -313,10 +313,10 @@ define @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, ptr %base, i64 %vl) + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } @@ -403,15 +403,15 @@ define @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind { ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16 ; SPILL-O2-VLEN128-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, ptr %base, i64 %vl) + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - %1 = extractvalue {,,} %0, 1 + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.nxv2i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.nxv4i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.nxv8i32(,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, ptr , i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr , i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll index d0cd4b12948c0..efc17b70923aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -9,17 +9,18 @@ define {, } @vector_deinterleave_nxv16i1_nxv ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v0, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 -; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vnsrl.wi v12, v8, 8 -; CHECK-NEXT: vmsne.vi v8, v12, 0 +; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v14, v10, 1, v0 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: vnsrl.wi v10, v12, 8 +; CHECK-NEXT: vmsne.vi v8, v10, 0 ; CHECK-NEXT: ret %retval = call {, } @llvm.vector.deinterleave2.nxv32i1( %vec) ret {, } %retval @@ -93,13 +94,15 @@ define {, } @vector_deinterleave_nxv64i1_nxv ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -108,12 +111,18 @@ define {, } @vector_deinterleave_nxv64i1_nxv ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v24, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v0, v16, 8 -; CHECK-NEXT: vnsrl.wi v4, v24, 8 +; CHECK-NEXT: vnsrl.wi v16, v8, 8 +; CHECK-NEXT: vnsrl.wi v20, v24, 8 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmsne.vi v8, v24, 0 +; CHECK-NEXT: vmsne.vi v8, v16, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %retval = call {, } @llvm.vector.deinterleave2.nxv128i1( %vec) ret {, } %retval diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll index 578b5dc6a2560..c91f34f010aa2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -4,8 +4,8 @@ declare { , i64 } @llvm.riscv.vleff.nxv8i8(, ptr, i64) declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(, ptr, , i64, i64 immarg) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(, , ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vleff_nxv8i8 @@ -66,49 +66,47 @@ define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 16) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 ret i64 %1 } -define i64 @test_vlseg2ff_nxv8i8_tu( %val, ptr %base, i64 %vl, ptr %outvl) { +define i64 @test_vlseg2ff_nxv8i8_tu(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: liveins: $v8_v9, $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY $v8_v9 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 16) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( %val, %val, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 ret i64 %1 } -define i64 @test_vlseg2ff_nxv8i8_mask( %val, ptr %base, %mask, i64 %vl, ptr %outvl) { +define i64 @test_vlseg2ff_nxv8i8_mask(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, ptr %outvl) { ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 + ; CHECK-NEXT: liveins: $v8_v9, $x10, $v0, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrn2m1nov0 = COPY $v8_v9 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 16) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, ptr %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 0, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 ret i64 %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll index e31155ff3f82a..481505a2095cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -1,12722 +1,13856 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv16i16_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv16i16_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i8: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv16i16_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i32: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i8: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i32: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i16: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: +define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8: +define @test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: +define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32: +define @test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: +define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16: +define @test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: +define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8: +define @test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: +define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32: +define @test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv16i8_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i16: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv16i8_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i8: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv16i8_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i32: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv16i8_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv16i8_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv16i8_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: +define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32: +define @test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv16i8_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: +define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16: +define @test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv16i8_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: +define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8: +define @test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv16i8_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i32, i32) - -define @test_vloxseg7_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i16: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i8: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i32: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i8: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i32: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i16: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8: +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32: +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i16: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i8: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i32: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv8i16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv8i16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv8i16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32: +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv8i16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv8i16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv8i16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: +define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i16: +define @test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: +define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i8: +define @test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: +define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i32: +define @test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16: +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8: +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32: +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16: +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8: +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i32_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i16: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i32_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i8: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv8i32_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i32: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i16: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i8: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg2_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i32: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg3_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg4_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg5_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg6_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg7_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vloxseg8_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16: +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8: +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32: +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16: +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8: +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32: +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16: +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv32i8_nxv32i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i16: +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv32i8_nxv32i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i8: +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i32: +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i8: +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i16: +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32: +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8: +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16: +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32: +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8: +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16: +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32: +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8: +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16: +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32: +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8: +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16: +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32: +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8: +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16: +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32: +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8: +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16: +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i32: +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i8: +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i16: +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32: +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8: +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16: +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32: +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8: +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16: +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32: +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8: +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16: +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32: +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8: +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16: +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32: +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8: +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16: +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32: +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8: +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16: +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4i32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i16: +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4i32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i8: +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4i32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i32: +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4i32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16: +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4i32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8: +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4i32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32: +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4i32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16: +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4i32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8: +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4i32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32: +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv16f16_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i16: +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv16f16_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i8: +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv16f16_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i32: +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f64_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i16: +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f64_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i8: +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f64_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i32: +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i8: +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i32: +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i16: +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i32: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i8: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i16: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i8: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i32: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i16: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i8: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i32: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i16: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv8f16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i16: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv8f16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i8: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv8f16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i32: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv8f16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv8f16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv8f16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv8f16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv8f16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv8f16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv8f32_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: +define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i16: +define @test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv8f32_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: +define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i8: +define @test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv8f32_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: +define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i32: +define @test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f64_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i32: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f64_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i8: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f64_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i16: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f64_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f64_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f64_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f64_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f64_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f64_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i16: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i8: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i32: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i32: +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i8: +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i16: +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vloxseg5_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vloxseg6_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vloxseg7_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vloxseg8_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i16: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i8: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i32, i32) -define @test_vloxseg2_nxv4f32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i32: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4f32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4f32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i32, i32) -define @test_vloxseg3_nxv4f32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4f32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4f32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i32, i32) -define @test_vloxseg4_nxv4f32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll index 627f514b47300..c308512753f2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -1,18178 +1,18284 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv16i16_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv16i16_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i8: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv16i16_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i32: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: +define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i32: +define @test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i8: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i64: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i64: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i16: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: +define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32: +define @test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i64: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i64: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: +define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32: +define @test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i64: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i64: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv16i8_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: +define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i16: +define @test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv16i8_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: +define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i8: +define @test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv16i8_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: +define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i32: +define @test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv16i8_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: +define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16: +define @test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv16i8_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: +define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8: +define @test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv16i8_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: +define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32: +define @test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv16i8_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv16i8_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv16i8_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i64: +define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i64: +define @test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i32: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i32: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i16: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i16: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i8: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i8: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i64: +define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i64: +define @test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i32: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i32: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i16: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i16: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i8: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i8: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i64: +define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i64: +define @test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i32: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i32: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i16: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i16: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i8: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i8: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i64: +define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i64: +define @test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i32: +define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i32: +define @test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i16: +define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i16: +define @test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i8: +define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i8: +define @test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i64: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i64: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i32: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i32: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i16: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i16: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i8: +define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i8: +define @test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i64: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i64: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i32: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i32: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i16: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i16: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i8: +define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i8: +define @test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i64: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i64: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i32: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i32: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i16: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i16: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i8: +define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i8: +define @test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i64: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i64: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i32: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i16: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: +define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i8: +define @test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i64: +define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i64: +define @test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: +define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32: +define @test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: +define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16: +define @test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i64: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i64: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: +define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16: +define @test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i64: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i64: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: +define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16: +define @test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i64: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i64: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: +define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16: +define @test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i64: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i64: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: +define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16: +define @test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i64: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i64: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: +define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16: +define @test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i16: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i8: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i64: +define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i64: +define @test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i32: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i64: +define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i64: +define @test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i64: +define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i64: +define @test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i32: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i8: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i64: +define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i64: +define @test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i16: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i64: +define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i64: +define @test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i64: +define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i64: +define @test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i64: +define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i64: +define @test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i64: +define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i64: +define @test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i64: +define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i64: +define @test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i64: +define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i64: +define @test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16: +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i64: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i64: +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i32: +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: +define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i16: +define @test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i64: +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i64: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i64: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i64: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i64: +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i64: +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i64: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i64: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: +define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16: +define @test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: +define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8: +define @test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i64: +define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i64: +define @test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: +define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32: +define @test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16: +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8: +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i64: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i64: +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: +define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32: +define @test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16: +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8: +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i32: +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: +define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i8: +define @test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i16: +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i64: +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i64: +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32: +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: +define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8: +define @test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16: +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i64: +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i64: +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32: +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: +define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8: +define @test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i64: +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i64: +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i64: +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i64: +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i64: +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i64: +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16: +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i64: +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i64: +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32: +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: +define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8: +define @test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16: +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i64: +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i64: +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i16: +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: +define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i8: +define @test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i64: +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i64: +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i32: +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16: +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: +define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8: +define @test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i64: +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i64: +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32: +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16: +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: +define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8: +define @test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i64: +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i64: +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32: +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16: +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: +define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8: +define @test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i64: +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i64: +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32: +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16: +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: +define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8: +define @test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i64: +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i64: +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32: +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16: +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: +define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8: +define @test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i64: +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i64: +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32: +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16: +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: +define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8: +define @test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i64: +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i64: +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32: +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i64_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i32: +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i32: +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i64_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i8: +define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i8: +define @test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i64_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i64: +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i64: +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i64_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i16: +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i16: +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i32: +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: +define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i8: +define @test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i64: +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i64: +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i16: +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32: +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: +define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8: +define @test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i64: +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i64: +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16: +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32: +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: +define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8: +define @test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i64: +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i64: +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16: +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32: +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: +define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8: +define @test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i64: +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i64: +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16: +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32: +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: +define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8: +define @test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i64: +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i64: +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16: +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32: +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: +define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8: +define @test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i64: +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i64: +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16: +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32: +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: +define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8: +define @test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i64: +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i64: +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16: +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i64: +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i64: +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: +define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i32: +define @test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i16: +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i8: +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i64: +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i64: +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: +define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32: +define @test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16: +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8: +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i64: +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i64: +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: +define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32: +define @test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16: +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8: +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i64: +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i64: +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: +define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32: +define @test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16: +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8: +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i64: +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i64: +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: +define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32: +define @test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16: +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8: +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i64: +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i64: +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: +define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32: +define @test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16: +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8: +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i64: +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i64: +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: +define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32: +define @test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16: +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8: +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i32: +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: +define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i8: +define @test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i16: +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i64: +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i64: +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32: +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: +define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8: +define @test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16: +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i64: +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i64: +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32: +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: +define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8: +define @test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16: +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i64: +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i64: +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32: +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: +define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8: +define @test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16: +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i64: +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i64: +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32: +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: +define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8: +define @test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16: +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i64: +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i64: +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32: +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: +define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8: +define @test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16: +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i64: +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i64: +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32: +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: +define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8: +define @test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i64: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i64: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i32_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i16: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i32_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: +define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i8: +define @test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i32_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i64: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i64: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv8i32_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i32: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv32i8_nxv32i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i16: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv32i8_nxv32i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: +define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i8: +define @test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i32: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i8: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i16: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i64: +define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i64: +define @test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i64: +define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i64: +define @test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i64: +define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i64: +define @test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vloxseg5_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i64: +define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i64: +define @test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg6_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i64: +define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i64: +define @test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vloxseg7_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i64: +define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i64: +define @test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vloxseg8_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i64: +define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i64: +define @test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i32: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i32: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i8: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i8: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i16: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i16: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vloxseg2_nxv2i64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i64: +define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i64: +define @test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i32: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i32: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i8: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i8: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i16: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i16: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vloxseg3_nxv2i64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i64: +define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i64: +define @test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i32: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i32: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i8: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i8: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i16: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i16: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vloxseg4_nxv2i64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i64: +define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i64: +define @test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv16f16_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i16: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv16f16_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i8: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv16f16_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i32: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f64_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: +define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i32: +define @test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f64_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i8: +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f64_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i64: +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i64: +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f64_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: +define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i16: +define @test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i64: +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i64: +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i32: +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i16: +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i8: +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i64: +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i64: +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32: +define @test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i64: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i64: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: +define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32: +define @test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i64: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i64: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: +define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32: +define @test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i64: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i64: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: +define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32: +define @test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i64: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i64: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: +define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32: +define @test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i64: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i64: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: +define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32: +define @test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i32: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: +define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i8: +define @test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i16: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i64: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i64: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: +define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8: +define @test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i64: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i64: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: +define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8: +define @test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i64: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i64: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: +define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8: +define @test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i64: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i64: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: +define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8: +define @test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i64: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i64: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: +define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8: +define @test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i64: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i64: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: +define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8: +define @test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i64: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i64: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i64: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i64: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: +define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i32: +define @test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i16: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i8: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i64: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i64: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: +define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32: +define @test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i64: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i64: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: +define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32: +define @test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i64: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i64: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: +define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32: +define @test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i64: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i64: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: +define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32: +define @test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i64: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i64: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: +define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32: +define @test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i64: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i64: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: +define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32: +define @test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i64: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i64: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: +define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i32: +define @test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i16: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i8: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i64: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i64: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: +define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32: +define @test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i64: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i64: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: +define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32: +define @test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i64: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i64: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: +define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32: +define @test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i64: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i64: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: +define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32: +define @test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i64: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i64: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: +define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32: +define @test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i64: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i64: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: +define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32: +define @test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i16: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: +define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i8: +define @test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i64: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i64: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i32: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv8f16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv8f16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: +define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8: +define @test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv8f16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i64: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i64: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv8f16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv8f16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv8f16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: +define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8: +define @test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv8f16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i64: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i64: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv8f16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f32_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i16: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f32_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: +define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i8: +define @test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f32_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i64: +define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i64: +define @test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv8f32_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: +define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i32: +define @test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: +define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i32: +define @test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i8: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i16: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i64: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i64: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: +define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32: +define @test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i64: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i64: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: +define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32: +define @test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i64: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i64: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: +define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i32: +define @test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i8: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i64: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i64: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i16: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: +define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32: +define @test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i64: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i64: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i64: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i64: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i64: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i64: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i64: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i64: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: +define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32: +define @test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i64: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i64: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: +define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32: +define @test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i64: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i64: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: +define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i32: +define @test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i8: +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i16: +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i64: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i64: +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: +define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i64: +define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i64: +define @test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i64: +define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i64: +define @test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64(,,,,, ptr, , , i64, i64) -define @test_vloxseg5_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i64: +define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i64: +define @test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64(,,,,,, ptr, , , i64, i64) -define @test_vloxseg6_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i64: +define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i64: +define @test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64(,,,,,,, ptr, , , i64, i64) -define @test_vloxseg7_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i64: +define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i64: +define @test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, ptr, , , i64, i64) -define @test_vloxseg8_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i64: +define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i64: +define @test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i32: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i8: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i64: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i64: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i64, i64) -define @test_vloxseg2_nxv4f32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: +define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i16: +define @test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i64: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i64: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i64, i64) -define @test_vloxseg3_nxv4f32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: +define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16: +define @test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i64: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i64: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i64, i64) -define @test_vloxseg4_nxv4f32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: +define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16: +define @test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vloxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll index 1e355b1b20c51..b96874fe90982 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -1,4335 +1,4247 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlseg2.nxv16i16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2_nxv16i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv16i16: +define @test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv16i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv16i16: +define @test_vlseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i8(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i8: +define @test_vlseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i8: +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i8(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg3_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i8: +define @test_vlseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i8: +define @test_vlseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i8(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg4_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i8: +define @test_vlseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i8: +define @test_vlseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg5_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i8: +define @test_vlseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i8: +define @test_vlseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg6_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i8: +define @test_vlseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlseg6_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i8: +define @test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg7_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i8: +define @test_vlseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i8: +define @test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg8_nxv1i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i8: +define @test_vlseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i8: +define @test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv16i8(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg2_nxv16i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv16i8: +define @test_vlseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg2e8.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv16i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv16i8: +define @test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg2e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv16i8(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg3_nxv16i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv16i8: +define @test_vlseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlseg3_mask_nxv16i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv16i8: +define @test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg3e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv16i8(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg4_nxv16i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv16i8: +define @test_vlseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg4e8.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv16i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv16i8: +define @test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg4e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg2_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i32: +define @test_vlseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i32: +define @test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i32(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg3_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i32: +define @test_vlseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i32: +define @test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i32(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg4_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i32: +define @test_vlseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i32: +define @test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg5_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2i32: +define @test_vlseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg4e8.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2i32: +define @test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg6_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2i32: +define @test_vlseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2i32: +define @test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg7_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2i32: +define @test_vlseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2i32: +define @test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg8_nxv2i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2i32: +define @test_vlseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2i32: +define @test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg2_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i16: +define @test_vlseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i16: +define @test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4i16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg3_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4i16: +define @test_vlseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4i16: +define @test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg4_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4i16: +define @test_vlseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4i16: +define @test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg5_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv4i16: +define @test_vlseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv4i16: +define @test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg6_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv4i16: +define @test_vlseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv4i16: +define @test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg7_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv4i16: +define @test_vlseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) + +define @test_vlseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) + +define @test_vlseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) + +define @test_vlseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) + +define @test_vlseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) + +define @test_vlseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) + +define @test_vlseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv4i16: +define @test_vlseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg8_nxv4i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv4i16: +define @test_vlseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv4i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv4i16: +define @test_vlseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg2_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i32: +define @test_vlseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlseg2_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i32: +define @test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i32(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg3_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i32: +define @test_vlseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i32: +define @test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i32(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg4_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i32: +define @test_vlseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i32: +define @test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg5_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i32: +define @test_vlseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vlseg5_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i32: +define @test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg6_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i32: +define @test_vlseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i32: +define @test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg7_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i32: +define @test_vlseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i32: +define @test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg8_nxv1i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i32: +define @test_vlseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlseg8_mask_nxv1i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i32: +define @test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8i16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg2_nxv8i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8i16: +define @test_vlseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8i16: +define @test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv8i16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg3_nxv8i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv8i16: +define @test_vlseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv8i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv8i16: +define @test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg4_nxv8i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv8i16: +define @test_vlseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv8i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv8i16: +define @test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8i8(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg2_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8i8: +define @test_vlseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8i8: +define @test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv8i8(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg3_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv8i8: +define @test_vlseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv8i8: +define @test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i8(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg4_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv8i8: +define @test_vlseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv8i8: +define @test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg5_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv8i8: +define @test_vlseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv8i8: +define @test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg6_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv8i8: +define @test_vlseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv8i8: +define @test_vlseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg7_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv8i8: +define @test_vlseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv8i8: +define @test_vlseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg8_nxv8i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv8i8: +define @test_vlseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv8i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv8i8: +define @test_vlseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg2_nxv8i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8i32: +define @test_vlseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8i32: +define @test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i8(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg2_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i8: +define @test_vlseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i8: +define @test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4i8(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg3_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4i8: +define @test_vlseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4i8: +define @test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i8(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg4_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4i8: +define @test_vlseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4i8: +define @test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg5_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv4i8: +define @test_vlseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv4i8: +define @test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg6_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv4i8: +define @test_vlseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv4i8: +define @test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg7_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv4i8: +define @test_vlseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv4i8: +define @test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg8_nxv4i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv4i8: +define @test_vlseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv4i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv4i8: +define @test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, ptr, , i32, i32) -define @test_vlseg2_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i16: +define @test_vlseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i16: +define @test_vlseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, ptr, , i32, i32) -define @test_vlseg3_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i16: +define @test_vlseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i16: +define @test_vlseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i16: +define @test_vlseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i16: +define @test_vlseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i16: +define @test_vlseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i16: +define @test_vlseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i16: +define @test_vlseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i16: +define @test_vlseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i16: +define @test_vlseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i16: +define @test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv1i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i16: +define @test_vlseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i16: +define @test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv32i8(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, ptr, , i32, i32) -define @test_vlseg2_nxv32i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv32i8: +define @test_vlseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vlseg2e8.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv32i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv32i8: +define @test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vlseg2e8.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i8(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, ptr, , i32, i32) -define @test_vlseg2_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i8: +define @test_vlseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i8: +define @test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i8(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, ptr, , i32, i32) -define @test_vlseg3_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i8: +define @test_vlseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i8: +define @test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i8(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i8: +define @test_vlseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i8: +define @test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2i8: +define @test_vlseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2i8: +define @test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2i8: +define @test_vlseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2i8: +define @test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2i8: +define @test_vlseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2i8: +define @test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv2i8(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2i8: +define @test_vlseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2i8(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2i8: +define @test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, ptr, , i32, i32) -define @test_vlseg2_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i16: +define @test_vlseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i16: +define @test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, ptr, , i32, i32) -define @test_vlseg3_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i16: +define @test_vlseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i16: +define @test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i16: +define @test_vlseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i16: +define @test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2i16: +define @test_vlseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2i16: +define @test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2i16: +define @test_vlseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2i16: +define @test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2i16: +define @test_vlseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2i16: +define @test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv2i16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2i16: +define @test_vlseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2i16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2i16: +define @test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, ptr, , i32, i32) -define @test_vlseg2_nxv4i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i32: +define @test_vlseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i32: +define @test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, ptr, , i32, i32) -define @test_vlseg3_nxv4i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4i32: +define @test_vlseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4i32: +define @test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i32(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv4i32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4i32: +define @test_vlseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4i32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4i32: +define @test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv16f16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, ptr, , i32, i32) -define @test_vlseg2_nxv16f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv16f16: +define @test_vlseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv16f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv16f16: +define @test_vlseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4f64(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, ptr, , i32, i32) -define @test_vlseg2_nxv4f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4f64: +define @test_vlseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vlseg2e64.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4f64: +define @test_vlseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vlseg2e64.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1f64(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, ptr, , i32, i32) -define @test_vlseg2_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1f64: +define @test_vlseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg2e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1f64: +define @test_vlseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg2e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1f64(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, ptr, , i32, i32) -define @test_vlseg3_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1f64: +define @test_vlseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1f64: +define @test_vlseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg3e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f64(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1f64: +define @test_vlseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1f64: +define @test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg4e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1f64: +define @test_vlseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1f64: +define @test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg5e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1f64: +define @test_vlseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1f64: +define @test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg6e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1f64: +define @test_vlseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1f64: +define @test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg7e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv1f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1f64: +define @test_vlseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1f64: +define @test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg8e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2f32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, ptr, , i32, i32) -define @test_vlseg2_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2f32: +define @test_vlseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2f32: +define @test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2f32(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, ptr, , i32, i32) -define @test_vlseg3_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2f32: +define @test_vlseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2f32: +define @test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f32(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2f32: +define @test_vlseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlseg4_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2f32: +define @test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2f32: +define @test_vlseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2f32: +define @test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2f32: +define @test_vlseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vlseg6_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2f32: +define @test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2f32: +define @test_vlseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2f32: +define @test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv2f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2f32: +define @test_vlseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlseg8_mask_nxv2f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2f32: +define @test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1f16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, ptr, , i32, i32) -define @test_vlseg2_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1f16: +define @test_vlseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1f16: +define @test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1f16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, ptr, , i32, i32) -define @test_vlseg3_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1f16: +define @test_vlseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1f16: +define @test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1f16: +define @test_vlseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1f16: +define @test_vlseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1f16: +define @test_vlseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1f16: +define @test_vlseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1f16: +define @test_vlseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1f16: +define @test_vlseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1f16: +define @test_vlseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1f16: +define @test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv1f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1f16: +define @test_vlseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1f16: +define @test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1f32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, ptr, , i32, i32) -define @test_vlseg2_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1f32: +define @test_vlseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1f32: +define @test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1f32(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, ptr, , i32, i32) -define @test_vlseg3_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1f32: +define @test_vlseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1f32: +define @test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f32(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1f32: +define @test_vlseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1f32: +define @test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1f32: +define @test_vlseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1f32: +define @test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1f32: +define @test_vlseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1f32: +define @test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1f32: +define @test_vlseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1f32: +define @test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv1f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1f32: +define @test_vlseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1f32: +define @test_vlseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8f16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, ptr, , i32, i32) -define @test_vlseg2_nxv8f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8f16: +define @test_vlseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8f16: +define @test_vlseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv8f16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, ptr, , i32, i32) -define @test_vlseg3_nxv8f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv8f16: +define @test_vlseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv8f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv8f16: +define @test_vlseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv8f16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv8f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv8f16: +define @test_vlseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0) +; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv8f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv8f16: +define @test_vlseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8f32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, ptr, , i32, i32) -define @test_vlseg2_nxv8f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8f32: +define @test_vlseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8f32: +define @test_vlseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2f64(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, ptr, , i32, i32) -define @test_vlseg2_nxv2f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2f64: +define @test_vlseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg2e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2f64: +define @test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg2e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2f64(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, ptr, , i32, i32) -define @test_vlseg3_nxv2f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2f64: +define @test_vlseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2f64: +define @test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg3e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f64(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv2f64(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2f64: +define @test_vlseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2f64(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2f64: +define @test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg4e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4f16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, ptr, , i32, i32) -define @test_vlseg2_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4f16: +define @test_vlseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4f16: +define @test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4f16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, ptr, , i32, i32) -define @test_vlseg3_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4f16: +define @test_vlseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4f16: +define @test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4f16: +define @test_vlseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4f16: +define @test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv4f16: +define @test_vlseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv4f16: +define @test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv4f16: +define @test_vlseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv4f16: +define @test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv4f16: +define @test_vlseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv4f16: +define @test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv4f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv4f16: +define @test_vlseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv4f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv4f16: +define @test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2f16(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, ptr, , i32, i32) -define @test_vlseg2_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2f16: +define @test_vlseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2f16: +define @test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2f16(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, ptr, , i32, i32) -define @test_vlseg3_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2f16: +define @test_vlseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2f16: +define @test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f16(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2f16: +define @test_vlseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2f16: +define @test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(,,,,, ptr , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, ptr, , i32, i32) -define @test_vlseg5_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2f16: +define @test_vlseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2f16: +define @test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(,,,,,, ptr , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, ptr, , i32, i32) -define @test_vlseg6_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2f16: +define @test_vlseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2f16: +define @test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(,,,,,,, ptr , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2f16: +define @test_vlseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2f16: +define @test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(,,,,,,,, ptr , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8_nxv2f16(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2f16: +define @test_vlseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2f16(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2f16: +define @test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4f32(,, ptr , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, ptr, , i32, i32) -define @test_vlseg2_nxv4f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4f32: +define @test_vlseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4f32: +define @test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4f32(,,, ptr , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, ptr, , i32, i32) -define @test_vlseg3_nxv4f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4f32: +define @test_vlseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4f32: +define @test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f32(,,,, ptr , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, ptr, , i32, i32) -define @test_vlseg4_nxv4f32(ptr %base, i32 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4f32: +define @test_vlseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4f32(ptr %base, i32 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4f32: +define @test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, ptr %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll index d012dc5200602..d6cbf362e7ece 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -1,4700 +1,4247 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlseg2.nxv16i16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2_nxv16i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv16i16: +define @test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv16i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv16i16: +define @test_vlseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2_nxv4i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i32: +define @test_vlseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i32: +define @test_vlseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg3_nxv4i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4i32: +define @test_vlseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4i32: +define @test_vlseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i32(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg4_nxv4i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4i32: +define @test_vlseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4i32: +define @test_vlseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv16i8(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2_nxv16i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv16i8: +define @test_vlseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vlseg2_mask_nxv16i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv16i8: +define @test_vlseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg2e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv16i8(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg3_nxv16i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv16i8: +define @test_vlseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg3e8.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv16i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv16i8: +define @test_vlseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg3e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv16i8(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg4_nxv16i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv16i8: +define @test_vlseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg4e8.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv16i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv16i8: +define @test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg4e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i64(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i64(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg2_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i64: +define @test_vlseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg2e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i64: +define @test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg2e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i64( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i64(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i64(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg3_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i64: +define @test_vlseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i64: +define @test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg3e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i64( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i64(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i64(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg4_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i64: +define @test_vlseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i64: +define @test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg4e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i64( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i64(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg5_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i64: +define @test_vlseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i64: +define @test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg5e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i64(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg6_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i64: +define @test_vlseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i64: +define @test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg6e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg7_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i64: +define @test_vlseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i64: +define @test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg7e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg8_nxv1i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i64: +define @test_vlseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i64: +define @test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg8e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg2_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i32: +define @test_vlseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i32: +define @test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i32(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg3_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i32: +define @test_vlseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg4e8.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i32: +define @test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i32(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg4_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i32: +define @test_vlseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i32: +define @test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg5_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i32: +define @test_vlseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i32: +define @test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg6_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i32: +define @test_vlseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i32: +define @test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg7_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i32: +define @test_vlseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i32: +define @test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg8_nxv1i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i32: +define @test_vlseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i32: +define @test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8i16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg2_nxv8i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8i16: +define @test_vlseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8i16: +define @test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv8i16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg3_nxv8i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv8i16: +define @test_vlseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv8i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv8i16: +define @test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg4_nxv8i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv8i16: +define @test_vlseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv8i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv8i16: +define @test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i8(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg2_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i8: +define @test_vlseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i8: +define @test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4i8(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg3_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4i8: +define @test_vlseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4i8: +define @test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i8(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg4_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4i8: +define @test_vlseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlseg4_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4i8: +define @test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg5_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv4i8: +define @test_vlseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv4i8: +define @test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg6_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv4i8: +define @test_vlseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv4i8: +define @test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg7_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv4i8: +define @test_vlseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv4i8: +define @test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg8_nxv4i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv4i8: +define @test_vlseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vlseg8_mask_nxv4i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv4i8: +define @test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) + +define @test_vlseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i16: +define @test_vlseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vlseg2_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i16: +define @test_vlseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} -define @test_vlseg3_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i16: +define @test_vlseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlseg3_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i16: +define @test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, ptr, , i64, i64) +define @test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} -define @test_vlseg4_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i16: +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vlseg4_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i16: +define @test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg5_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i16: +define @test_vlseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlseg5_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i16: +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) + +define @test_vlseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) + +define @test_vlseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg6_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i16: +define @test_vlseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vlseg6_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i16: +define @test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) + +define @test_vlseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg7_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i16: +define @test_vlseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) + +define @test_vlseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlseg7_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i16: +define @test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) + +define @test_vlseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) + +define @test_vlseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) + +define @test_vlseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) + +define @test_vlseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) + +define @test_vlseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg8_nxv1i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i16: +define @test_vlseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i16: +define @test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg2_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i32: +define @test_vlseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i32: +define @test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i32(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg3_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i32: +define @test_vlseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i32: +define @test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i32(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg4_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i32: +define @test_vlseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vlseg4_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i32: +define @test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg5_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2i32: +define @test_vlseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2i32: +define @test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg6_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2i32: +define @test_vlseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2i32: +define @test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg7_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2i32: +define @test_vlseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0) +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlseg7_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2i32: +define @test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg8_nxv2i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2i32: +define @test_vlseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2i32: +define @test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8i8(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg2_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8i8: +define @test_vlseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8i8: +define @test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv8i8(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg3_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv8i8: +define @test_vlseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv8i8: +define @test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i8(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg4_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv8i8: +define @test_vlseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv8i8: +define @test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg5_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv8i8: +define @test_vlseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv8i8: +define @test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg6_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv8i8: +define @test_vlseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv8i8: +define @test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg7_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv8i8: +define @test_vlseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv8i8: +define @test_vlseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg8_nxv8i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv8i8: +define @test_vlseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv8i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv8i8: +define @test_vlseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i64(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i64(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2_nxv4i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i64: +define @test_vlseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vlseg2_mask_nxv4i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i64: +define @test_vlseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vlseg2e64.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i64( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4i16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg2_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4i16: +define @test_vlseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4i16: +define @test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4i16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg3_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4i16: +define @test_vlseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4i16: +define @test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg4_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4i16: +define @test_vlseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4i16: +define @test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg5_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv4i16: +define @test_vlseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv4i16: +define @test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg6_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv4i16: +define @test_vlseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv4i16: +define @test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg7_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv4i16: +define @test_vlseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv4i16: +define @test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg8_nxv4i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv4i16: +define @test_vlseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv4i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv4i16: +define @test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i8(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg2_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1i8: +define @test_vlseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1i8: +define @test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1i8(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, ptr, , i64, i64) -define @test_vlseg3_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1i8: +define @test_vlseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1i8: +define @test_vlseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i8(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1i8: +define @test_vlseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1i8: +define @test_vlseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1i8: +define @test_vlseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1i8: +define @test_vlseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1i8: +define @test_vlseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1i8: +define @test_vlseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1i8: +define @test_vlseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1i8: +define @test_vlseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv1i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1i8: +define @test_vlseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1i8: +define @test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i8(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, ptr, , i64, i64) -define @test_vlseg2_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i8: +define @test_vlseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i8: +define @test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i8(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, ptr, , i64, i64) -define @test_vlseg3_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i8: +define @test_vlseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i8: +define @test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i8(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i8: +define @test_vlseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i8: +define @test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2i8: +define @test_vlseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2i8: +define @test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2i8: +define @test_vlseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2i8: +define @test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2i8: +define @test_vlseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2i8: +define @test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv2i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2i8: +define @test_vlseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2i8: +define @test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8i32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, ptr, , i64, i64) -define @test_vlseg2_nxv8i32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8i32: +define @test_vlseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8i32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8i32: +define @test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv32i8(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, ptr, , i64, i64) -define @test_vlseg2_nxv32i8(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv32i8: +define @test_vlseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vlseg2e8.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv32i8(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv32i8: +define @test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vlseg2e8.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, ptr, , i64, i64) -define @test_vlseg2_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i16: +define @test_vlseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i16: +define @test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, ptr, , i64, i64) -define @test_vlseg3_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i16: +define @test_vlseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i16: +define @test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i16: +define @test_vlseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i16: +define @test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2i16: +define @test_vlseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2i16: +define @test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2i16: +define @test_vlseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2i16: +define @test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2i16: +define @test_vlseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2i16: +define @test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv2i16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2i16: +define @test_vlseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2i16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2i16: +define @test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2i64(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i64(,, ptr, , i64, i64) -define @test_vlseg2_nxv2i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2i64: +define @test_vlseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg2e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2i64: +define @test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg2e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i64( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2i64(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i64(,,, ptr, , i64, i64) -define @test_vlseg3_nxv2i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2i64: +define @test_vlseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2i64: +define @test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg3e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i64( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i64(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i64(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv2i64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2i64: +define @test_vlseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2i64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2i64: +define @test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg4e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i64( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv16f16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, ptr, , i64, i64) -define @test_vlseg2_nxv16f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv16f16: +define @test_vlseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv16f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv16f16: +define @test_vlseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4f64(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, ptr, , i64, i64) -define @test_vlseg2_nxv4f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4f64: +define @test_vlseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vlseg2e64.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4f64: +define @test_vlseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vlseg2e64.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1f64(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, ptr, , i64, i64) -define @test_vlseg2_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1f64: +define @test_vlseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg2e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1f64: +define @test_vlseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg2e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1f64(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, ptr, , i64, i64) -define @test_vlseg3_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1f64: +define @test_vlseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1f64: +define @test_vlseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg3e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f64(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1f64: +define @test_vlseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1f64: +define @test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg4e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1f64: +define @test_vlseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1f64: +define @test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg5e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1f64: +define @test_vlseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1f64: +define @test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg6e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1f64: +define @test_vlseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1f64: +define @test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg7e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv1f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1f64: +define @test_vlseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1f64: +define @test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg8e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2f32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, ptr, , i64, i64) -define @test_vlseg2_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2f32: +define @test_vlseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2f32: +define @test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2f32(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, ptr, , i64, i64) -define @test_vlseg3_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2f32: +define @test_vlseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2f32: +define @test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f32(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2f32: +define @test_vlseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlseg4_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2f32: +define @test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2f32: +define @test_vlseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2f32: +define @test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2f32: +define @test_vlseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vlseg6_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2f32: +define @test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2f32: +define @test_vlseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2f32: +define @test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv2f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2f32: +define @test_vlseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlseg8_mask_nxv2f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2f32: +define @test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1f16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, ptr, , i64, i64) -define @test_vlseg2_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1f16: +define @test_vlseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1f16: +define @test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1f16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, ptr, , i64, i64) -define @test_vlseg3_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1f16: +define @test_vlseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1f16: +define @test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1f16: +define @test_vlseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1f16: +define @test_vlseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1f16: +define @test_vlseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1f16: +define @test_vlseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1f16: +define @test_vlseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1f16: +define @test_vlseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1f16: +define @test_vlseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1f16: +define @test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv1f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1f16: +define @test_vlseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1f16: +define @test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1f32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, ptr, , i64, i64) -define @test_vlseg2_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv1f32: +define @test_vlseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv1f32: +define @test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv1f32(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, ptr, , i64, i64) -define @test_vlseg3_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv1f32: +define @test_vlseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv1f32: +define @test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f32(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv1f32: +define @test_vlseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv1f32: +define @test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv1f32: +define @test_vlseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv1f32: +define @test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv1f32: +define @test_vlseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv1f32: +define @test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv1f32: +define @test_vlseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv1f32: +define @test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv1f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv1f32: +define @test_vlseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv1f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv1f32: +define @test_vlseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8f16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, ptr, , i64, i64) -define @test_vlseg2_nxv8f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8f16: +define @test_vlseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8f16: +define @test_vlseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv8f16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, ptr, , i64, i64) -define @test_vlseg3_nxv8f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv8f16: +define @test_vlseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv8f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv8f16: +define @test_vlseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv8f16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv8f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv8f16: +define @test_vlseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0) +; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv8f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv8f16: +define @test_vlseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv8f32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, ptr, , i64, i64) -define @test_vlseg2_nxv8f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv8f32: +define @test_vlseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv8f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv8f32: +define @test_vlseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2f64(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, ptr, , i64, i64) -define @test_vlseg2_nxv2f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2f64: +define @test_vlseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg2e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2f64: +define @test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg2e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2f64(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, ptr, , i64, i64) -define @test_vlseg3_nxv2f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2f64: +define @test_vlseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2f64: +define @test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg3e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f64(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv2f64(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2f64: +define @test_vlseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2f64(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2f64: +define @test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg4e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4f16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, ptr, , i64, i64) -define @test_vlseg2_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4f16: +define @test_vlseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4f16: +define @test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4f16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, ptr, , i64, i64) -define @test_vlseg3_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4f16: +define @test_vlseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4f16: +define @test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4f16: +define @test_vlseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4f16: +define @test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv4f16: +define @test_vlseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv4f16: +define @test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv4f16: +define @test_vlseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv4f16: +define @test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv4f16: +define @test_vlseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv4f16: +define @test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv4f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv4f16: +define @test_vlseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv4f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv4f16: +define @test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv2f16(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, ptr, , i64, i64) -define @test_vlseg2_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv2f16: +define @test_vlseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv2f16: +define @test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv2f16(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, ptr, , i64, i64) -define @test_vlseg3_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv2f16: +define @test_vlseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv2f16: +define @test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f16(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv2f16: +define @test_vlseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0) +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv2f16: +define @test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(,,,,, ptr , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, ptr, , i64, i64) -define @test_vlseg5_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg5_nxv2f16: +define @test_vlseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlseg5_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg5_mask_nxv2f16: +define @test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(,,,,,, ptr , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, ptr, , i64, i64) -define @test_vlseg6_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg6_nxv2f16: +define @test_vlseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg6_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg6_mask_nxv2f16: +define @test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(,,,,,,, ptr , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg7_nxv2f16: +define @test_vlseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg7_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg7_mask_nxv2f16: +define @test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(,,,,,,,, ptr , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8_nxv2f16(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg8_nxv2f16: +define @test_vlseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlseg8_mask_nxv2f16(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg8_mask_nxv2f16: +define @test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv4f32(,, ptr , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, ptr, , i64, i64) -define @test_vlseg2_nxv4f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg2_nxv4f32: +define @test_vlseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg2_mask_nxv4f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg2_mask_nxv4f32: +define @test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlseg3.nxv4f32(,,, ptr , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, ptr, , i64, i64) -define @test_vlseg3_nxv4f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg3_nxv4f32: +define @test_vlseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg3_mask_nxv4f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg3_mask_nxv4f32: +define @test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f32(,,,, ptr , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, ptr, , i64, i64) -define @test_vlseg4_nxv4f32(ptr %base, i64 %vl) { -; CHECK-LABEL: test_vlseg4_nxv4f32: +define @test_vlseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlseg4_mask_nxv4f32(ptr %base, i64 %vl, %mask) { -; CHECK-LABEL: test_vlseg4_mask_nxv4f32: +define @test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, ptr %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll index 9588c85fbdbdd..1516d656663b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -2,8 +2,8 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64x \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) define void @test_vlseg2ff_dead_value(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: @@ -14,24 +14,23 @@ define void @test_vlseg2ff_dead_value(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 store i32 %1, ptr %outvl ret void } -define void @test_vlseg2ff_mask_dead_value( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +define void @test_vlseg2ff_mask_dead_value(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 store i32 %1, ptr %outvl ret void } @@ -43,22 +42,25 @@ define @test_vlseg2ff_dead_vl(ptr %base, i32 %vl) { ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } -define @test_vlseg2ff_mask_dead_vl( %val, ptr %base, i32 %vl, %mask) { +define @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } define void @test_vlseg2ff_dead_all(ptr %base, i32 %vl) { @@ -68,18 +70,17 @@ define void @test_vlseg2ff_dead_all(ptr %base, i32 %vl) { ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i32 %vl) + tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vlseg2ff_mask_dead_all( %val, ptr %base, i32 %vl, %mask) { +define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) + tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll index 0a397386fd693..0e4915895ef34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -1,3366 +1,4942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv16i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv16i16: +define @test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: +define @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i8: +define @test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8: +define @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i8: +define @test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8: +define @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i8: +define @test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8: +define @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i8: +define @test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8: +define @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i8: +define @test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8: +define @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i8: +define @test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8: +define @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i8: +define @test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8: +define @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv16i8: +define @test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8: +define @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv16i8: +define @test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv16i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8: +define @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv16i8: +define @test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v6, (a0) +; CHECK-NEXT: vlseg3e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv16i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8: +define @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t +; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i32: +define @test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i32: +define @test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i32: +define @test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2i32: +define @test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2i32: +define @test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2i32: +define @test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32: +define @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2i32: +define @test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32: +define @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i16: +define @test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16: +define @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4i16: +define @test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16: +define @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4i16: +define @test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16: +define @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv4i16: +define @test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16: +define @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv4i16: +define @test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16: +define @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv4i16: +define @test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16: +define @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv4i16: +define @test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv4i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16: +define @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i32: +define @test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32: +define @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i32: +define @test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32: +define @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i32: +define @test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32: +define @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i32: +define @test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32: +define @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i32: +define @test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32: +define @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i32: +define @test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32: +define @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i32: +define @test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32: +define @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8i16: +define @test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16: +define @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv8i16: +define @test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv8i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16: +define @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv8i16: +define @test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv8i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16: +define @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8i8: +define @test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8: +define @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv8i8: +define @test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8: +define @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv8i8: +define @test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8: +define @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv8i8: +define @test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8: +define @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv8i8: +define @test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8: +define @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv8i8: +define @test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8: +define @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv8i8: +define @test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv8i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8: +define @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv8i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8i32: +define @test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32: +define @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i8: +define @test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8: +define @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4i8: +define @test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8: +define @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4i8: +define @test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8: +define @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv4i8: +define @test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8: +define @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv4i8: +define @test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8: +define @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv4i8: +define @test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8: +define @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv4i8: +define @test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv4i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8: +define @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i16: +define @test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16: +define @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i16: +define @test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16: +define @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i16: +define @test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16: +define @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i16: +define @test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16: +define @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i16: +define @test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16: +define @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg7ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i16: +define @test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16: +define @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) -define @test_vlseg8ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i16: +define @test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16: +define @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv32i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv32i8: +define @test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv32i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8: +define @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg2ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i8: +define @test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i8( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8: +define @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg3ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i8: +define @test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8: +define @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) -define @test_vlseg4ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i8: +define @test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8: +define @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg5ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2i8: +define @test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8: +define @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, ptr, , i32, i32) +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) -define @test_vlseg6ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2i8: +define @test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8: +define @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) + +define @test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) + +define @test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) + +define @test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) + +define @test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) + +define @test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) + +define @test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) + +define @test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) + +define @test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) + +define @test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) + +define @test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) + +define @test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2i8: +define @test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8: +define @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2i8: +define @test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2i8( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8: +define @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i16: +define @test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16: +define @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i16: +define @test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16: +define @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i16: +define @test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16: +define @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2i16: +define @test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16: +define @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2i16: +define @test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16: +define @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2i16: +define @test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16: +define @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2i16: +define @test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2i16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16: +define @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i32: +define @test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32: +define @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4i32: +define @test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32: +define @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4i32: +define @test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4i32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32: +define @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv16f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv16f16: +define @test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16: +define @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv4f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4f64: +define @test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64: +define @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1f64: +define @test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64: +define @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1f64: +define @test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64: +define @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1f64: +define @test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64: +define @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1f64: +define @test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64: +define @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1f64: +define @test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64: +define @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1f64: +define @test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64: +define @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1f64: +define @test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64: +define @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2f32: +define @test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32: +define @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2f32: +define @test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32: +define @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2f32: +define @test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32: +define @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2f32: +define @test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) @@ -3368,38 +4944,80 @@ define @test_vlseg5ff_nxv2f32(ptr %base, i32 %vl, ptr %outv ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32: +define @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 +} + +define @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2f32: +define @test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) @@ -3407,1389 +5025,1419 @@ define @test_vlseg6ff_nxv2f32(ptr %base, i32 %vl, ptr %outv ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32: +define @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2f32: +define @test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32: +define @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2f32: +define @test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32: +define @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1f16: +define @test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16: +define @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1f16: +define @test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16: +define @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1f16: +define @test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16: +define @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1f16: +define @test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16: +define @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1f16: +define @test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16: +define @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1f16: +define @test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16: +define @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +define @test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16: +define @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1f32: +define @test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32: +define @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1f32: +define @test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32: +define @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1f32: +define @test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32: +define @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1f32: +define @test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32: +define @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1f32: +define @test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32: +define @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1f32: +define @test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32: +define @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1f32: +define @test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32: +define @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8f16: +define @test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16: +define @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv8f16: +define @test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv8f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16: +define @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv8f16: +define @test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv8f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16: +define @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv8f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8f32: +define @test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32: +define @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv2f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2f64: +define @test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f64( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64: +define @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv2f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2f64: +define @test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64: +define @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv2f64(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2f64: +define @test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2f64( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64: +define @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4f16: +define @test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16: +define @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4f16: +define @test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4f16: +define @test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv4f16: +define @test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv4f16: +define @test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv4f16: +define @test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16: +define @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv4f16: +define @test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv4f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16: +define @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2f16: +define @test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f16( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16: +define @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2f16: +define @test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16: +define @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2f16: +define @test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16: +define @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(,,,,, ptr , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, ptr, , i32, i32) -define @test_vlseg5ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2f16: +define @test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16: +define @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,, i32} %0, 1 - %2 = extractvalue {,,,,, i32} %0, 5 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(,,,,,, ptr , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, ptr, , i32, i32) -define @test_vlseg6ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2f16: +define @test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16: +define @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,, i32} %0, 6 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(,,,,,,, ptr , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, ptr, , i32, i32) -define @test_vlseg7ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2f16: +define @test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) @@ -4797,184 +6445,206 @@ define @test_vlseg7ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16: +define @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,, i32} %0, 7 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(,,,,,,,, ptr , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, ptr, , i32, i32) -define @test_vlseg8ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2f16: +define @test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2f16( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16: +define @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,, i32} %0, 1 - %2 = extractvalue {,,,,,,,, i32} %0, 8 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(,, ptr , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, ptr, , i32, i32) -define @test_vlseg2ff_nxv4f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4f32: +define @test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f32( undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32: +define @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,, i32} %0, 1 - %2 = extractvalue {,, i32} %0, 2 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(,,, ptr , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, ptr, , i32, i32) -define @test_vlseg3ff_nxv4f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4f32: +define @test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32( undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32: +define @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,, i32} %0, 1 - %2 = extractvalue {,,, i32} %0, 3 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(,,,, ptr , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, ptr, , i32, i32) -define @test_vlseg4ff_nxv4f32(ptr %base, i32 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4f32: +define @test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32( undef, undef, undef, undef, ptr %base, i32 %vl) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %vl, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4f32( %val, ptr %base, i32 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32: +define @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,, i32} %0, 1 - %2 = extractvalue {,,,, i32} %0, 4 - store i32 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 1, i32 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i32} %0, 1 + store i32 %3, ptr %outvl + ret %2 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll index 02c2994d96622..3dc0db90b6d85 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -2,8 +2,8 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64x \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) define void @test_vlseg2ff_dead_value(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: @@ -14,24 +14,23 @@ define void @test_vlseg2ff_dead_value(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 store i64 %1, ptr %outvl ret void } -define void @test_vlseg2ff_mask_dead_value( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +define void @test_vlseg2ff_mask_dead_value(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 2 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 store i64 %1, ptr %outvl ret void } @@ -43,22 +42,25 @@ define @test_vlseg2ff_dead_vl(ptr %base, i64 %vl) { ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } -define @test_vlseg2ff_mask_dead_vl( %val, ptr %base, i64 %vl, %mask) { +define @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } define void @test_vlseg2ff_dead_all(ptr %base, i64 %vl) { @@ -68,18 +70,17 @@ define void @test_vlseg2ff_dead_all(ptr %base, i64 %vl) { ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i64 %vl) + tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vlseg2ff_mask_dead_all( %val, ptr %base, i64 %vl, %mask) { +define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) + tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll index 1c32fd322d617..632fbc1e4431d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -1,159 +1,168 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv16i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv16i16: +define @test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: +define @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv4i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i32: +define @test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32: +define @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv4i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4i32: +define @test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32: +define @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv4i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4i32: +define @test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32: +define @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv16i8: +define @test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v6, (a0) @@ -161,998 +170,1088 @@ define @test_vlseg2ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl) ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8: +define @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv16i8: +define @test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlseg2e8ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv16i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8: +define @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv16i8: +define @test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv16i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8: +define @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i64: +define @test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64: +define @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i64: +define @test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64: +define @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i64: +define @test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64: +define @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i64: +define @test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg3e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64: +define @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i64: +define @test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64: +define @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i64: +define @test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64: +define @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i64: +define @test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64: +define @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i32: +define @test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32: +define @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i32: +define @test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlseg4e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32: +define @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i32: +define @test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32: +define @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i32: +define @test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32: +define @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i32: +define @test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32: +define @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i32: +define @test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32: +define @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i32: +define @test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32: +define @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv8i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8i16: +define @test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16: +define @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv8i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv8i16: +define @test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv8i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16: +define @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv8i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv8i16: +define @test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv8i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16: +define @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i8: +define @test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8: +define @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4i8: +define @test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8: +define @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4i8: +define @test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8: +define @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv4i8: +define @test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8: +define @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv4i8: +define @test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8: +define @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv4i8: +define @test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8: +define @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv4i8: +define @test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) @@ -1160,2627 +1259,3684 @@ define @test_vlseg8ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv4i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8: +define @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i16: +define @test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16: +define @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 3) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i16: +define @test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16: +define @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i16: +define @test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16: +define @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i16: +define @test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16: +define @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i16: +define @test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16: +define @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i16: +define @test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16: +define @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i16: +define @test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16: +define @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i32: +define @test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32: +define @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i32: +define @test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32: +define @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i32: +define @test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32: +define @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2i32: +define @test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2i32: +define @test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2i32: +define @test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2i32: +define @test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32: +define @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8i8: +define @test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8: +define @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv8i8: +define @test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8: +define @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv8i8: +define @test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8: +define @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv8i8: +define @test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8: +define @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv8i8: +define @test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8: +define @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv8i8: +define @test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8: +define @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv8i8: +define @test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv8i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8: +define @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv4i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i64: +define @test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64: +define @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4i16: +define @test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16: +define @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4i16: +define @test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16: +define @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4i16: +define @test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16: +define @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv4i16: +define @test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16: +define @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv4i16: +define @test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16: +define @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv4i16: +define @test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16: +define @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv4i16: +define @test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv4i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16: +define @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1i8: +define @test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8: +define @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1i8: +define @test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8: +define @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1i8: +define @test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8: +define @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1i8: +define @test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8: +define @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t -; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1i8: +define @test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8: +define @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1i8: +define @test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8: +define @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1i8: +define @test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) + +define @test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8: +define @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i8: +define @test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8: +define @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg3ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i8: +define @test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg3e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8: +define @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) -define @test_vlseg4ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i8: +define @test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg4e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8: +define @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg5ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2i8: +define @test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg5e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8: +define @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) -define @test_vlseg6ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2i8: +define @test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg6e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8: +define @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg7ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2i8: +define @test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg7e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8: +define @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) -define @test_vlseg8ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2i8: +define @test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg8e8ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8: +define @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv8i32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8i32: +define @test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8i32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32: +define @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) -define @test_vlseg2ff_nxv32i8(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv32i8: +define @test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vlseg2e8ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv32i8( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8: +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) + +define @test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) + +define @test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) + +define @test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) + +define @test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i16: +define @test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16: +define @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i16: +define @test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16: +define @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i16: +define @test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16: +define @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2i16: +define @test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16: +define @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2i16: +define @test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16: +define @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2i16: +define @test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16: +define @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2i16: +define @test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2i16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16: +define @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv2i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2i64: +define @test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64: +define @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv2i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2i64: +define @test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64: +define @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv2i64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2i64: +define @test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2i64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64: +define @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv16f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv16f16: +define @test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16: +define @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv4f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4f64: +define @test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64: +define @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1f64: +define @test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64: +define @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1f64: +define @test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64: +define @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1f64: +define @test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64: +define @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1f64: +define @test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64: +define @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1f64: +define @test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64: +define @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1f64: +define @test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64: +define @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv1f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1f64: +define @test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64: +define @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2f32: +define @test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32: +define @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2f32: +define @test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vlseg4e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32: +define @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2f32: +define @test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32: +define @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2f32: +define @test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) @@ -3788,38 +4944,80 @@ define @test_vlseg5ff_nxv2f32(ptr %base, i64 %vl, ptr %outv ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32: +define @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + + +define @test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 +} + +define @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret +entry: + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2f32: +define @test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) @@ -3827,1389 +5025,1419 @@ define @test_vlseg6ff_nxv2f32(ptr %base, i64 %vl, ptr %outv ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32: +define @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2f32: +define @test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32: +define @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv2f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2f32: +define @test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32: +define @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1f16: +define @test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16: +define @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1f16: +define @test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16: +define @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1f16: +define @test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16: +define @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1f16: +define @test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16: +define @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1f16: +define @test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vlseg2e64ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16: +define @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1f16: +define @test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16: +define @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +define @test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16: +define @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv1f32: +define @test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32: +define @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv1f32: +define @test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vlseg4e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32: +define @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv1f32: +define @test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32: +define @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv1f32: +define @test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg6e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32: +define @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv1f32: +define @test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32: +define @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv1f32: +define @test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlseg8e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32: +define @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 6) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv1f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv1f32: +define @test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv1f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32: +define @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv8f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8f16: +define @test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16: +define @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv8f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv8f16: +define @test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv8f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16: +define @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv8f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv8f16: +define @test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v6, (a0) +; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv8f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16: +define @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t +; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv8f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv8f32: +define @test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v4, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv8f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32: +define @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv2f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2f64: +define @test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg2e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f64( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64: +define @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv2f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2f64: +define @test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64: +define @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv2f64(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2f64: +define @test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2f64( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64: +define @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4f16: +define @test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16: +define @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 3), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4f16: +define @test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4f16: +define @test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv4f16: +define @test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv4f16: +define @test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16: +define @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 4), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv4f16: +define @test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16: +define @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv4f16: +define @test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv4f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16: +define @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv2f16: +define @test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f16( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16: +define @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 5), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv2f16: +define @test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16: +define @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv2f16: +define @test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16ff.v v7, (a0) +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16: +define @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(,,,,, ptr , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, ptr, , i64, i64) -define @test_vlseg5ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_nxv2f16: +define @test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg5ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16: +define @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,, i64} %0, 1 - %2 = extractvalue {,,,,, i64} %0, 5 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 6), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(,,,,,, ptr , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, ptr, , i64, i64) -define @test_vlseg6ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_nxv2f16: +define @test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg6ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16: +define @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,, i64} %0, 6 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(,,,,,,, ptr , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, ptr, , i64, i64) -define @test_vlseg7ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_nxv2f16: +define @test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) @@ -5217,184 +6445,206 @@ define @test_vlseg7ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg7ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16: +define @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,, i64} %0, 7 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(,,,,,,,, ptr , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, ptr, , i64, i64) -define @test_vlseg8ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_nxv2f16: +define @test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16ff.v v7, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg8ff_mask_nxv2f16( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16: +define @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,, i64} %0, 1 - %2 = extractvalue {,,,,,,,, i64} %0, 8 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 7), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, ptr, , i64, i64) -define @test_vlseg2ff_nxv4f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_nxv4f32: +define @test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg2e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f32( undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv4f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32: +define @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(,,, ptr , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, ptr, , i64, i64) -define @test_vlseg3ff_nxv4f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_nxv4f32: +define @test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32( undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg3ff_mask_nxv4f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32: +define @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,, i64} %0, 1 - %2 = extractvalue {,,, i64} %0, 3 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(,,,, ptr , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, ptr, , i64, i64) -define @test_vlseg4ff_nxv4f32(ptr %base, i64 %vl, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_nxv4f32: +define @test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32ff.v v6, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32( undef, undef, undef, undef, ptr %base, i64 %vl) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg4ff_mask_nxv4f32( %val, ptr %base, i64 %vl, %mask, ptr %outvl) { -; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32: +define @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { +; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v9, v10 +; CHECK-NEXT: vmv1r.v v10, v11 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,, i64} %0, 1 - %2 = extractvalue {,,,, i64} %0, 4 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %1, i32 1) + %3 = extractvalue {target("riscv.vector.tuple", , 8), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll index 8341306efe92f..4b475dd96e00e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -1,4335 +1,4247 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlsseg2.nxv16i16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv16i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv16i16: +define @test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv16i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: +define @test_vlsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i8(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i8: +define @test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i8: +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i8(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i8: +define @test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i8: +define @test_vlsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i8: +define @test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i8: +define @test_vlsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg5_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i8: +define @test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i8: +define @test_vlsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg6_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i8: +define @test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlsseg6_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i8: +define @test_vlsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg7_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i8: +define @test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i8: +define @test_vlsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg8_nxv1i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i8: +define @test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i8: +define @test_vlsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv16i8(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv16i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv16i8: +define @test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv16i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv16i8: +define @test_vlsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv16i8(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv16i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv16i8: +define @test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlsseg3_mask_nxv16i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv16i8: +define @test_vlsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv16i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv16i8: +define @test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv16i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv16i8: +define @test_vlsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i32: +define @test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i32: +define @test_vlsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i32(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i32: +define @test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i32: +define @test_vlsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i32: +define @test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i32: +define @test_vlsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg5_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2i32: +define @test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2i32: +define @test_vlsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg6_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2i32: +define @test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2i32: +define @test_vlsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg7_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2i32: +define @test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2i32: +define @test_vlsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg8_nxv2i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2i32: +define @test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2i32: +define @test_vlsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i16: +define @test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i16: +define @test_vlsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4i16: +define @test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4i16: +define @test_vlsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4i16: +define @test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4i16: +define @test_vlsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg5_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv4i16: +define @test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv4i16: +define @test_vlsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg6_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv4i16: +define @test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv4i16: +define @test_vlsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) -define @test_vlsseg7_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv4i16: +define @test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) + +define @test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) + +define @test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) + +define @test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) + +define @test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) + +define @test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) + +define @test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) + +define @test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) + +define @test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) + +define @test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) + +define @test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) + +define @test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) + +define @test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) + +define @test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) + +define @test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) + +define @test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) + +define @test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) + +define @test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) + +define @test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) + +define @test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) + +define @test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) + +define @test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) + +define @test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv4i16: +define @test_vlsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg8_nxv4i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv4i16: +define @test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv4i16: +define @test_vlsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i32: +define @test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlsseg2_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i32: +define @test_vlsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i32(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i32: +define @test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i32: +define @test_vlsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i32: +define @test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i32: +define @test_vlsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg5_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i32: +define @test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vlsseg5_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i32: +define @test_vlsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg6_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i32: +define @test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i32: +define @test_vlsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg7_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i32: +define @test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i32: +define @test_vlsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg8_nxv1i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i32: +define @test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlsseg8_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i32: +define @test_vlsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8i16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv8i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8i16: +define @test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8i16: +define @test_vlsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv8i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv8i16: +define @test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv8i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv8i16: +define @test_vlsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv8i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv8i16: +define @test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv8i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv8i16: +define @test_vlsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8i8(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8i8: +define @test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8i8: +define @test_vlsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i8(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv8i8: +define @test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv8i8: +define @test_vlsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv8i8: +define @test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv8i8: +define @test_vlsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) -define @test_vlsseg5_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv8i8: +define @test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv8i8: +define @test_vlsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg6_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv8i8: +define @test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv8i8: +define @test_vlsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg7_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv8i8: +define @test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv8i8: +define @test_vlsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) -define @test_vlsseg8_nxv8i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv8i8: +define @test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv8i8: +define @test_vlsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8i32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv8i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8i32: +define @test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8i32: +define @test_vlsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i8(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) -define @test_vlsseg2_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i8: +define @test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i8: +define @test_vlsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i8(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg3_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4i8: +define @test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4i8: +define @test_vlsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) -define @test_vlsseg4_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4i8: +define @test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4i8: +define @test_vlsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) -define @test_vlsseg5_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv4i8: +define @test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv4i8: +define @test_vlsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) -define @test_vlsseg6_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv4i8: +define @test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv4i8: +define @test_vlsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) -define @test_vlsseg7_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv4i8: +define @test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv4i8: +define @test_vlsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, ptr, i32, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) -define @test_vlsseg8_nxv4i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv4i8: +define @test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv4i8: +define @test_vlsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i16: +define @test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i16: +define @test_vlsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i16: +define @test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i16: +define @test_vlsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i16: +define @test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i16: +define @test_vlsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i16: +define @test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i16: +define @test_vlsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i16: +define @test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i16: +define @test_vlsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i16: +define @test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i16: +define @test_vlsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv1i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i16: +define @test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i16: +define @test_vlsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv32i8(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv32i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv32i8: +define @test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv32i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv32i8: +define @test_vlsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu -; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i8(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i8: +define @test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i8: +define @test_vlsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i8(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i8: +define @test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i8: +define @test_vlsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i8: +define @test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i8: +define @test_vlsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2i8: +define @test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2i8: +define @test_vlsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2i8: +define @test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2i8: +define @test_vlsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2i8: +define @test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2i8: +define @test_vlsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv2i8(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2i8: +define @test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2i8: +define @test_vlsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i16: +define @test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i16: +define @test_vlsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i16: +define @test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i16: +define @test_vlsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i16: +define @test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i16: +define @test_vlsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2i16: +define @test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2i16: +define @test_vlsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2i16: +define @test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2i16: +define @test_vlsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2i16: +define @test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2i16: +define @test_vlsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv2i16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2i16: +define @test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2i16: +define @test_vlsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv4i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i32: +define @test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i32: +define @test_vlsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i32(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv4i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4i32: +define @test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4i32: +define @test_vlsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv4i32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4i32: +define @test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4i32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4i32: +define @test_vlsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv16f16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv16f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv16f16: +define @test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv16f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv16f16: +define @test_vlsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4f64(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv4f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4f64: +define @test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4f64: +define @test_vlsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1f64(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1f64: +define @test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1f64: +define @test_vlsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f64(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1f64: +define @test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1f64: +define @test_vlsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1f64: +define @test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1f64: +define @test_vlsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1f64: +define @test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1f64: +define @test_vlsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1f64: +define @test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1f64: +define @test_vlsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1f64: +define @test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1f64: +define @test_vlsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv1f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1f64: +define @test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1f64: +define @test_vlsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2f32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2f32: +define @test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2f32: +define @test_vlsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f32(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2f32: +define @test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2f32: +define @test_vlsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2f32: +define @test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlsseg4_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2f32: +define @test_vlsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2f32: +define @test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2f32: +define @test_vlsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2f32: +define @test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vlsseg6_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2f32: +define @test_vlsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2f32: +define @test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2f32: +define @test_vlsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv2f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2f32: +define @test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlsseg8_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2f32: +define @test_vlsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1f16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1f16: +define @test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1f16: +define @test_vlsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1f16: +define @test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1f16: +define @test_vlsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1f16: +define @test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1f16: +define @test_vlsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1f16: +define @test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1f16: +define @test_vlsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1f16: +define @test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1f16: +define @test_vlsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1f16: +define @test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1f16: +define @test_vlsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv1f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1f16: +define @test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1f16: +define @test_vlsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1f32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1f32: +define @test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1f32: +define @test_vlsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f32(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1f32: +define @test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1f32: +define @test_vlsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1f32: +define @test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1f32: +define @test_vlsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1f32: +define @test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1f32: +define @test_vlsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1f32: +define @test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1f32: +define @test_vlsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1f32: +define @test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1f32: +define @test_vlsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv1f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1f32: +define @test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1f32: +define @test_vlsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8f16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv8f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8f16: +define @test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8f16: +define @test_vlsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv8f16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv8f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv8f16: +define @test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv8f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv8f16: +define @test_vlsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv8f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv8f16: +define @test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv8f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv8f16: +define @test_vlsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8f32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv8f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8f32: +define @test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8f32: +define @test_vlsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2f64(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv2f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2f64: +define @test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2f64: +define @test_vlsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f64(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv2f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2f64: +define @test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2f64: +define @test_vlsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv2f64(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2f64: +define @test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2f64(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2f64: +define @test_vlsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4f16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4f16: +define @test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4f16: +define @test_vlsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4f16: +define @test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4f16: +define @test_vlsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4f16: +define @test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4f16: +define @test_vlsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv4f16: +define @test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv4f16: +define @test_vlsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv4f16: +define @test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv4f16: +define @test_vlsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv4f16: +define @test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv4f16: +define @test_vlsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv4f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv4f16: +define @test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv4f16: +define @test_vlsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2f16(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2f16: +define @test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2f16: +define @test_vlsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f16(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2f16: +define @test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2f16: +define @test_vlsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2f16: +define @test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2f16: +define @test_vlsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(,,,,, ptr, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, ptr, i32, , i32, i32) -define @test_vlsseg5_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2f16: +define @test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2f16: +define @test_vlsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(,,,,,, ptr, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg6_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2f16: +define @test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2f16: +define @test_vlsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(,,,,,,, ptr, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg7_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2f16: +define @test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2f16: +define @test_vlsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(,,,,,,,, ptr, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, ptr, i32, , i32, i32) -define @test_vlsseg8_nxv2f16(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2f16: +define @test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2f16: +define @test_vlsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4f32(,, ptr, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, ptr, i32, , i32, i32) -define @test_vlsseg2_nxv4f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4f32: +define @test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4f32: +define @test_vlsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f32(,,, ptr, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, ptr, i32, , i32, i32) -define @test_vlsseg3_nxv4f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4f32: +define @test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4f32: +define @test_vlsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(,,,, ptr, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, ptr, i32, , i32, i32) -define @test_vlsseg4_nxv4f32(ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4f32: +define @test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4f32(ptr %base, i32 %offset, i32 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4f32: +define @test_vlsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i32 %offset, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, ptr %base, i32 %offset, %mask, i32 %vl, i32 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i32 %offset, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll index 2114369ea347d..6cc95979eb13e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -1,4700 +1,4247 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlsseg2.nxv16i16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv16i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv16i16: +define @test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: +define @test_vlsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv4i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i32: +define @test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i32: +define @test_vlsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i32(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv4i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4i32: +define @test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4i32: +define @test_vlsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv4i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4i32: +define @test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4i32: +define @test_vlsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv16i8(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv16i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv16i8: +define @test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vlsseg2_mask_nxv16i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv16i8: +define @test_vlsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv16i8(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv16i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv16i8: +define @test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv16i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv16i8: +define @test_vlsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv16i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv16i8: +define @test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv16i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv16i8: +define @test_vlsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i64(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i64(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i64: +define @test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i64: +define @test_vlsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i64( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i64(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i64(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i64: +define @test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i64: +define @test_vlsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i64( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i64(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i64: +define @test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i64: +define @test_vlsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i64(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i64: +define @test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i64: +define @test_vlsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i64: +define @test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i64: +define @test_vlsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i64: +define @test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i64: +define @test_vlsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv1i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i64: +define @test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i64: +define @test_vlsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i32: +define @test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i32: +define @test_vlsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i32(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i32: +define @test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i32: +define @test_vlsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i32: +define @test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i32: +define @test_vlsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i32: +define @test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i32: +define @test_vlsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i32: +define @test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i32: +define @test_vlsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i32: +define @test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i32: +define @test_vlsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv1i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i32: +define @test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i32: +define @test_vlsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8i16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv8i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8i16: +define @test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8i16: +define @test_vlsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv8i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv8i16: +define @test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv8i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv8i16: +define @test_vlsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv8i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv8i16: +define @test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv8i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv8i16: +define @test_vlsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i8(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i8: +define @test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i8: +define @test_vlsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i8(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4i8: +define @test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4i8: +define @test_vlsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4i8: +define @test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlsseg4_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4i8: +define @test_vlsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv4i8: +define @test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv4i8: +define @test_vlsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv4i8: +define @test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv4i8: +define @test_vlsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv4i8: +define @test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv4i8: +define @test_vlsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv4i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv4i8: +define @test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vlsseg8_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv4i8: +define @test_vlsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) + +define @test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i16: +define @test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vlsseg2_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i16: +define @test_vlsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} -define @test_vlsseg3_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i16: +define @test_vlsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) + +define @test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vlsseg3_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i16: +define @test_vlsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) + +define @test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) + +define @test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) + +define @test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, ptr, i64, , i64, i64) +define @test_vlsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} -define @test_vlsseg4_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i16: +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) + +define @test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vlsseg4_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i16: +define @test_vlsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) + +define @test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) + +define @test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) + +define @test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i16: +define @test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlsseg5_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i16: +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) + +define @test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) + +define @test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i16: +define @test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vlsseg6_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i16: +define @test_vlsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) + +define @test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i16: +define @test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) + +define @test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlsseg7_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i16: +define @test_vlsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) + +define @test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) + +define @test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) + +define @test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) + +define @test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) + +define @test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) + +define @test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv1i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i16: +define @test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i16: +define @test_vlsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i32: +define @test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i32: +define @test_vlsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i32(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i32: +define @test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i32: +define @test_vlsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i32: +define @test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vlsseg4_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i32: +define @test_vlsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2i32: +define @test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2i32: +define @test_vlsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2i32: +define @test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2i32: +define @test_vlsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2i32: +define @test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlsseg7_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2i32: +define @test_vlsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv2i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2i32: +define @test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2i32: +define @test_vlsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8i8(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8i8: +define @test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8i8: +define @test_vlsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i8(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv8i8: +define @test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv8i8: +define @test_vlsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv8i8: +define @test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv8i8: +define @test_vlsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv8i8: +define @test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv8i8: +define @test_vlsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv8i8: +define @test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv8i8: +define @test_vlsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv8i8: +define @test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv8i8: +define @test_vlsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv8i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv8i8: +define @test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv8i8: +define @test_vlsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i64(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i64(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv4i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i64: +define @test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vlsseg2_mask_nxv4i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i64: +define @test_vlsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i64( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4i16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4i16: +define @test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4i16: +define @test_vlsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) -define @test_vlsseg3_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4i16: +define @test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4i16: +define @test_vlsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg4_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4i16: +define @test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4i16: +define @test_vlsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) -define @test_vlsseg5_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv4i16: +define @test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv4i16: +define @test_vlsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) -define @test_vlsseg6_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv4i16: +define @test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv4i16: +define @test_vlsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) -define @test_vlsseg7_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv4i16: +define @test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv4i16: +define @test_vlsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) -define @test_vlsseg8_nxv4i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv4i16: +define @test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv4i16: +define @test_vlsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1i8(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) -define @test_vlsseg2_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1i8: +define @test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1i8: +define @test_vlsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i8(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1i8: +define @test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1i8: +define @test_vlsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1i8: +define @test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1i8: +define @test_vlsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1i8: +define @test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1i8: +define @test_vlsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1i8: +define @test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1i8: +define @test_vlsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1i8: +define @test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1i8: +define @test_vlsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv1i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1i8: +define @test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1i8: +define @test_vlsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i8(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i8: +define @test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i8: +define @test_vlsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i8(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i8: +define @test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i8: +define @test_vlsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i8: +define @test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i8: +define @test_vlsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2i8: +define @test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2i8: +define @test_vlsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2i8: +define @test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2i8: +define @test_vlsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2i8: +define @test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2i8: +define @test_vlsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv2i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2i8: +define @test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2i8: +define @test_vlsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8i32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv8i32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8i32: +define @test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8i32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8i32: +define @test_vlsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv32i8(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv32i8(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv32i8: +define @test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv32i8(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv32i8: +define @test_vlsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu -; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i16: +define @test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i16: +define @test_vlsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i16: +define @test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i16: +define @test_vlsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i16: +define @test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i16: +define @test_vlsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2i16: +define @test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2i16: +define @test_vlsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2i16: +define @test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2i16: +define @test_vlsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2i16: +define @test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2i16: +define @test_vlsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv2i16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2i16: +define @test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2i16: +define @test_vlsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2i64(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i64(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv2i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2i64: +define @test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2i64: +define @test_vlsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i64( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i64(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i64(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv2i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2i64: +define @test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2i64: +define @test_vlsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i64( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i64(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv2i64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2i64: +define @test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2i64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2i64: +define @test_vlsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv16f16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv16f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv16f16: +define @test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv16f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv16f16: +define @test_vlsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4f64(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv4f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4f64: +define @test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4f64: +define @test_vlsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1f64(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1f64: +define @test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1f64: +define @test_vlsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f64(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1f64: +define @test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1f64: +define @test_vlsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1f64: +define @test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1f64: +define @test_vlsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1f64: +define @test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1f64: +define @test_vlsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1f64: +define @test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1f64: +define @test_vlsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1f64: +define @test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1f64: +define @test_vlsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv1f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1f64: +define @test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1f64: +define @test_vlsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2f32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2f32: +define @test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2f32: +define @test_vlsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f32(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2f32: +define @test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2f32: +define @test_vlsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2f32: +define @test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vlsseg4_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2f32: +define @test_vlsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2f32: +define @test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2f32: +define @test_vlsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2f32: +define @test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vlsseg6_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2f32: +define @test_vlsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2f32: +define @test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2f32: +define @test_vlsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv2f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2f32: +define @test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vlsseg8_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2f32: +define @test_vlsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1f16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1f16: +define @test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1f16: +define @test_vlsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1f16: +define @test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1f16: +define @test_vlsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1f16: +define @test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1f16: +define @test_vlsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1f16: +define @test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1f16: +define @test_vlsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1f16: +define @test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1f16: +define @test_vlsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1f16: +define @test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1f16: +define @test_vlsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv1f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1f16: +define @test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1f16: +define @test_vlsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv1f32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv1f32: +define @test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv1f32: +define @test_vlsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f32(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv1f32: +define @test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv1f32: +define @test_vlsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv1f32: +define @test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv1f32: +define @test_vlsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv1f32: +define @test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv1f32: +define @test_vlsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv1f32: +define @test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv1f32: +define @test_vlsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv1f32: +define @test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv1f32: +define @test_vlsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv1f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv1f32: +define @test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv1f32: +define @test_vlsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8f16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv8f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8f16: +define @test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8f16: +define @test_vlsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv8f16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv8f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv8f16: +define @test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv8f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv8f16: +define @test_vlsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv8f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv8f16: +define @test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv8f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv8f16: +define @test_vlsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv8f32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv8f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv8f32: +define @test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv8f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv8f32: +define @test_vlsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2f64(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv2f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2f64: +define @test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2f64: +define @test_vlsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f64(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv2f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2f64: +define @test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2f64: +define @test_vlsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv2f64(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2f64: +define @test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2f64(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2f64: +define @test_vlsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4f16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4f16: +define @test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4f16: +define @test_vlsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4f16: +define @test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4f16: +define @test_vlsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4f16: +define @test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4f16: +define @test_vlsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv4f16: +define @test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv4f16: +define @test_vlsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv4f16: +define @test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv4f16: +define @test_vlsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv4f16: +define @test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv4f16: +define @test_vlsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv4f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv4f16: +define @test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv4f16: +define @test_vlsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv2f16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv2f16: +define @test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv2f16: +define @test_vlsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f16(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv2f16: +define @test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv2f16: +define @test_vlsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv2f16: +define @test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv2f16: +define @test_vlsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(,,,,, ptr, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, ptr, i64, , i64, i64) -define @test_vlsseg5_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg5_nxv2f16: +define @test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vlsseg5_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg5_mask_nxv2f16: +define @test_vlsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(,,,,,, ptr, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg6_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg6_nxv2f16: +define @test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg6_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg6_mask_nxv2f16: +define @test_vlsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(,,,,,,, ptr, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg7_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg7_nxv2f16: +define @test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg7_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg7_mask_nxv2f16: +define @test_vlsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(,,,,,,,, ptr, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, ptr, i64, , i64, i64) -define @test_vlsseg8_nxv2f16(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg8_nxv2f16: +define @test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vlsseg8_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg8_mask_nxv2f16: +define @test_vlsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vlsseg2.nxv4f32(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, ptr, i64, , i64, i64) -define @test_vlsseg2_nxv4f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg2_nxv4f32: +define @test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg2_mask_nxv4f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg2_mask_nxv4f32: +define @test_vlsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f32(,,, ptr, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, ptr, i64, , i64, i64) -define @test_vlsseg3_nxv4f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg3_nxv4f32: +define @test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg3_mask_nxv4f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg3_mask_nxv4f32: +define @test_vlsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(,,,, ptr, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, ptr, i64, , i64, i64) -define @test_vlsseg4_nxv4f32(ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vlsseg4_nxv4f32: +define @test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vlsseg4_mask_nxv4f32(ptr %base, i64 %offset, i64 %vl, %mask) { -; CHECK-LABEL: test_vlsseg4_mask_nxv4f32: +define @test_vlsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vlsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, ptr %base, i64 %offset, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, ptr %base, i64 %offset, %mask, i64 %vl, i64 1) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll index 3d2503a8558a2..10bfdec0e2c99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -1,12722 +1,13856 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv16i16_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv16i16_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv16i16_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: +define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8: +define @test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: +define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32: +define @test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: +define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16: +define @test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: +define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8: +define @test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: +define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32: +define @test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv1i8_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv1i8_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv1i8_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv16i8_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv16i8_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv16i8_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv16i8_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv16i8_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv16i8_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: +define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32: +define @test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv16i8_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: +define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16: +define @test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv16i8_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: +define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8: +define @test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv16i8_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vluxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i32, i32) - -define @test_vluxseg7_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vluxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv2i32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv2i32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv2i32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv4i16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv4i16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv4i16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv1i32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8: +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv1i32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32: +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv1i32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv8i16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv8i16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv8i16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32: +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv8i16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv8i16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv8i16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: +define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16: +define @test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: +define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8: +define @test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: +define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32: +define @test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16: +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8: +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32: +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16: +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) + +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) + +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) + +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) + +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) + +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) + +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) + +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8: +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv8i8_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv8i8_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv8i8_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i32_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i32_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv8i32_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg2_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg3_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg4_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg5_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg6_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg7_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv4i8_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) -define @test_vluxseg8_nxv4i8_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv4i8_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16: +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8: +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32: +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16: +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1i16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8: +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1i16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32: +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1i16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16: +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv32i8_nxv32i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16: +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv32i8_nxv32i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8: +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32: +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8: +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16: +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32: +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8: +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16: +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32: +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8: +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16: +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32: +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8: +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16: +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32: +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8: +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16: +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32: +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8: +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16: +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2i8_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32: +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2i8_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8: +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2i8_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16: +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32: +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8: +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16: +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32: +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8: +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16: +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32: +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8: +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16: +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32: +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8: +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16: +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32: +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8: +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16: +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32: +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8: +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16: +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2i16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32: +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2i16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8: +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2i16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16: +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4i32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16: +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4i32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8: +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4i32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32: +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4i32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16: +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4i32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8: +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4i32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32: +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4i32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16: +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4i32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8: +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4i32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32: +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv16f16_nxv16i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16: +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv16f16_nxv16i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8: +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv16f16_nxv16i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32: +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f64_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16: +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f64_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8: +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f64_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32: +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8: +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32: +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16: +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f64_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f64_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f64_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2f32_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2f32_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2f32_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f16_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f16_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f16_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f32_nxv1i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f32_nxv1i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv1f32_nxv1i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv8f16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv8f16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv8f16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv8f16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv8f16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv8f16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv8f16_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv8f16_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv8f16_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv8f32_nxv8i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: +define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16: +define @test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv8f32_nxv8i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: +define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8: +define @test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv8f32_nxv8i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: +define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32: +define @test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f64_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f64_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f64_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f64_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f64_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f64_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f64_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f64_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f64_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv4f16_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv4f16_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv4f16_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32: +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8: +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16: +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i32, i32) -define @test_vluxseg5_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i32, i32) -define @test_vluxseg6_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i32, i32) -define @test_vluxseg7_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2f16_nxv2i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2f16_nxv2i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i32, i32) -define @test_vluxseg8_nxv2f16_nxv2i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(,, ptr, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i32, i32) -define @test_vluxseg2_nxv4f32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32( undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4f32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4f32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(,,, ptr, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i32, i32) -define @test_vluxseg3_nxv4f32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32( undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4f32_nxv4i16(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4f32_nxv4i8(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(,,,, ptr, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i32, i32) -define @test_vluxseg4_nxv4f32_nxv4i32(ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i32 %vl, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i32 %vl, i32 1, i32 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll index 2bdfcdf0005fa..28f70ce08bfe1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -1,18178 +1,18284 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv16i16_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv16i16_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv16i16_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: +define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32: +define @test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i64: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i64: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: +define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32: +define @test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i64: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i64: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: +define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32: +define @test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i64: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i64: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv16i8_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: +define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16: +define @test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv16i8_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: +define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8: +define @test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv16i8_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: +define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32: +define @test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv16i8_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: +define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16: +define @test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv16i8_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: +define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8: +define @test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv16i8_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: +define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32: +define @test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1.nxv32i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv32i8.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv16i8_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv16i8_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv16i8_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i64: +define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i64: +define @test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i32: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i32: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i16: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i16: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i8: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i8: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i64: +define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i64: +define @test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i32: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i32: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i16: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i16: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i8: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i8: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i64: +define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i64: +define @test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i32: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i32: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i16: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i16: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i8: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i8: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i64: +define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i64: +define @test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i32: +define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i32: +define @test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i16: +define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i16: +define @test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i8: +define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i8: +define @test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i64: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i64: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i32: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i32: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i16: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i16: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i8: +define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i8: +define @test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i64: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i64: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i32: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i32: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i16: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i16: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i8: +define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i8: +define @test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i64: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i64: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i32: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i32: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i16: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i16: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i8: +define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i8: +define @test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i64: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i64: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: +define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8: +define @test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i64: +define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i64: +define @test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: +define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32: +define @test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: +define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16: +define @test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv16i8.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i64: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i64: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: +define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16: +define @test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i64: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i64: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: +define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16: +define @test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i64: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i64: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: +define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16: +define @test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i64: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i64: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: +define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16: +define @test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i64: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i64: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: +define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16: +define @test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i64: +define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i64: +define @test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i64: +define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i64: +define @test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i64: +define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i64: +define @test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i64: +define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i64: +define @test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i64: +define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i64: +define @test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i64: +define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i64: +define @test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i64: +define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i64: +define @test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i64: +define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i64: +define @test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i64: +define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i64: +define @test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv2i8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i8_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i8_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i8_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i64: +define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i64: +define @test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv4i8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i8_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16: +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i64: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i64: +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32: +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: +define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16: +define @test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv8i8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i64: +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i64: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i64: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i64: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i64: +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i64: +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i64: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i64: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: +define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16: +define @test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: +define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8: +define @test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i64: +define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i64: +define @test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: +define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32: +define @test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16: +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8: +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i64: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i64: +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: +define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32: +define @test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16: +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8: +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32: +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: +define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8: +define @test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16: +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i64: +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i64: +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32: +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: +define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8: +define @test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16: +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i64: +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i64: +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32: +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: +define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8: +define @test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i64: +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i64: +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i64: +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i64: +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i64: +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i64: +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16: +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i64: +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i64: +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32: +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: +define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8: +define @test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8i16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16: +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i64: +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i64: +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16: +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: +define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8: +define @test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i64: +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i64: +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32: +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16: +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: +define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8: +define @test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i64: +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i64: +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32: +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16: +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: +define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8: +define @test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i64: +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i64: +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32: +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16: +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: +define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8: +define @test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i64: +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i64: +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32: +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16: +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: +define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8: +define @test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i64: +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i64: +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32: +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16: +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: +define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8: +define @test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i64: +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i64: +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32: +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv8i8_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16: +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv8i8_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: +define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8: +define @test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv8i8_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i64: +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i64: +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv8i8_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32: +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i64_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i32: +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i64_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i32: +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i64_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i8: +define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i64_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i8: +define @test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i64_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i64: +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i64_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i64: +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i64_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i16: +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i64_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i16: +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32: +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: +define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8: +define @test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i64: +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i64: +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16: +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32: +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: +define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8: +define @test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1i16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i64: +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i64: +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16: +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32: +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: +define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8: +define @test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2i16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i64: +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i64: +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16: +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32: +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: +define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8: +define @test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4i16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i64: +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i64: +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16: +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32: +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: +define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8: +define @test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i64: +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i64: +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16: +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32: +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: +define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8: +define @test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i64: +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i64: +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16: +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32: +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: +define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8: +define @test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i64: +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i64: +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv4i16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16: +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i64: +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i64: +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: +define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32: +define @test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16: +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8: +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i64: +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i64: +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: +define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32: +define @test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16: +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8: +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i64: +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i64: +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: +define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32: +define @test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16: +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8: +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i64: +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i64: +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: +define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32: +define @test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16: +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8: +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i64: +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i64: +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: +define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32: +define @test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16: +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8: +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i64: +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i64: +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: +define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32: +define @test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16: +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8: +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i8_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i64: +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i64: +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i8_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: +define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32: +define @test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i8_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16: +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv1i8_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8: +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32: +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: +define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8: +define @test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16: +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i64: +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i64: +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32: +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: +define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8: +define @test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16: +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i64: +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i64: +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32: +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: +define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8: +define @test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16: +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i64: +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i64: +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32: +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: +define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8: +define @test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16: +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i64: +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i64: +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32: +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: +define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8: +define @test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16: +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i64: +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i64: +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32: +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: +define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8: +define @test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16: +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i64: +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i64: +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i8_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32: +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i8_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: +define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8: +define @test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i8_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i8_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i64: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i64: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i32_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i32_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: +define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8: +define @test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i32_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i64: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i64: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv8i32_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv32i8_nxv32i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv32i8_nxv32i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: +define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8: +define @test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i64: +define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i64: +define @test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i64: +define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i64: +define @test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4i64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i64: +define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i64: +define @test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64(,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) -define @test_vluxseg5_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i64: +define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i64: +define @test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64(,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg6_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i64: +define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i64: +define @test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64(,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) -define @test_vluxseg7_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i64: +define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i64: +define @test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2i64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) -define @test_vluxseg8_nxv2i16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i64: +define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i64: +define @test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i32: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i32: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i8: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i8: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i16: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i16: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) -define @test_vluxseg2_nxv2i64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i64: +define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i64: +define @test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i32: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i32: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i8: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i8: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i16: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i16: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) -define @test_vluxseg3_nxv2i64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i64: +define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i64: +define @test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i32: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i32: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i8: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i8: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i16: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i16: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64(,,,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) -define @test_vluxseg4_nxv2i64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i64: +define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i64: +define @test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1i64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv16f16_nxv16i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv16f16_nxv16i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv16f16_nxv16i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f64_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: +define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32: +define @test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f64_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8: +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8f16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1f16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f64_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i64: +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i64: +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f64_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: +define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16: +define @test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2f16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i64: +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i64: +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32: +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4f16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16: +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8: +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i64: +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i64: +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32: +define @test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i64: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i64: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: +define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32: +define @test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv8f32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i64: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i64: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: +define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32: +define @test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i64: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i64: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: +define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32: +define @test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i64: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i64: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: +define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32: +define @test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f64_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i64: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i64: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f64_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: +define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32: +define @test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f64_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f64_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: +define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8: +define @test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i64: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i64: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: +define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8: +define @test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv4f32.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i64: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i64: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: +define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8: +define @test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i64: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg4_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i64: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: +define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8: +define @test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i64: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i64: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: +define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8: +define @test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i64: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg6_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i64: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: +define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8: +define @test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i64: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i64: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f32_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f32_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: +define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8: +define @test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f32_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f32_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i64: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -define @test_vluxseg8_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i64: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i64: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i64: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: +define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32: +define @test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i64: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i64: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: +define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32: +define @test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1f32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i64: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i64: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: +define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32: +define @test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv2f32.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i64: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i64: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: +define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32: +define @test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i64: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i64: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: +define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32: +define @test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i64: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i64: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: +define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32: +define @test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv4f64.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f16_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i64: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i64: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f16_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: +define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32: +define @test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f16_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f16_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i64: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i64: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: +define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32: +define @test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i64: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i64: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: +define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32: +define @test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i64: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i64: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: +define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32: +define @test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv2f64.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i64: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i64: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: +define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32: +define @test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i64: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i64: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: +define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32: +define @test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i64: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i64: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: +define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32: +define @test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f32_nxv1i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i64: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i64: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f32_nxv1i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: +define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32: +define @test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 6) + %1 = call @llvm.riscv.tuple.extract.nxv1f64.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f32_nxv1i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv1f32_nxv1i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: +define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8: +define @test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i64: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i64: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv8f16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv8f16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: +define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8: +define @test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv8f16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i64: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i64: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv8f16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv8f16_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv8f16_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: +define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8: +define @test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv8f16_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i64: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i64: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv8f16_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f32_nxv8i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f32_nxv8i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: +define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8: +define @test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f32_nxv8i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i64: +define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i64: +define @test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv8f32_nxv8i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: +define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32: +define @test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: +define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32: +define @test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1.nxv16i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv16bf16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i64: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i64: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: +define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32: +define @test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i64: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i64: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f64_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: +define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32: +define @test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f64_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f64_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f64_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i64: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i64: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: +define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32: +define @test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i64: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i64: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: +define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32: +define @test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i64: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i64: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i64: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i64: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i64: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i64: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i64: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i64: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: +define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32: +define @test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.nxv8i64(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv8bf16.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i64: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i64: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv4f16_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: +define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32: +define @test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv4f16_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv4f16_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i64: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i64: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv4f16_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: +define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32: +define @test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8: +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16: +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i64: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i64: +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: +define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i64: +define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i64: +define @test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i64: +define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i64: +define @test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(,,,,, ptr, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64(,,,,, ptr, , , i64, i64) -define @test_vluxseg5_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i64: +define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -define @test_vluxseg5_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i64: +define @test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(,,,,,, ptr, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64(,,,,,, ptr, , , i64, i64) -define @test_vluxseg6_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i64: +define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg6_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i64: +define @test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(,,,,,,, ptr, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64(,,,,,,, ptr, , , i64, i64) -define @test_vluxseg7_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i64: +define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg7_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i64: +define @test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f16_nxv2i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f16_nxv2i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f16_nxv2i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(,,,,,,,, ptr, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, ptr, , , i64, i64) -define @test_vluxseg8_nxv2f16_nxv2i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i64: +define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -define @test_vluxseg8_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i64: +define @test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i64: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i64: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i64, i64) -define @test_vluxseg2_nxv4f32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: +define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16( undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16: +define @test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1.nxv1i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv1bf16.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i64: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i64: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(,,, ptr, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i64, i64) -define @test_vluxseg3_nxv4f32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: +define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16( undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16: +define @test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1.nxv2i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv2bf16.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f32_nxv4i32(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f32_nxv4i8(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f32_nxv4i64(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i64: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i64: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(,,,, ptr, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i64, i64) -define @test_vluxseg4_nxv4f32_nxv4i16(ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: +define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, ptr %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } -define @test_vluxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16: +define @test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,} %0, 1 - ret %1 + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1.nxv4i64(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 4) + %1 = call @llvm.riscv.tuple.extract.nxv4bf16.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll index 90ded94e9d8cd..c24895a0e6380 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll @@ -1,13163 +1,11880 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16i16_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16i16_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16i16_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16: +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16: +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16: +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8: +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8: +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32: +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32: +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16: +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16: +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8: +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8: +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32: +define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32: +define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16: +define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16: +define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8: +define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8: +define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32: +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32: +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16: +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32: +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8: +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16: +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32: +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8: +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16: +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16: +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8: +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32: +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16: +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8: +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32: +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16: +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8: +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32: +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16: +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8: +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32: +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16: +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8: +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32: +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16: +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8: +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32: +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16: +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8: +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32: +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8: +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32: +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16: +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8: +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32: +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16: +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8: +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32: +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16: +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8: +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32: +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16: +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8: +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32: +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16: +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8: +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32: +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16: +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8: +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32: +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16: +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16: +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16: +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8: +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8: +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32: +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16: +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8: +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32: +define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16: +define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8: +define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i32_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i32_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv8i32_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv32i8_nxv32i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv32i8_nxv32i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16: +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8: +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32: +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16f16_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16f16_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv16f16_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4f64_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4f64_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv4f64_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsoxseg4_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg5_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsoxseg6_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsoxseg7_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsoxseg8_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsoxseg2_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsoxseg3_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32: +define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 -; CHECK-NEXT: ret +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8: +define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16: +define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv8f32_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16: +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16: +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv8f32_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8: +define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8: +define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv8f32_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32: +define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32: +define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i32) -define void @test_vsoxseg5_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i32) -define void @test_vsoxseg6_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i32) -define void @test_vsoxseg7_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i32) -define void @test_vsoxseg8_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i32) -define void @test_vsoxseg2_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i32) -define void @test_vsoxseg2_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i32) -define void @test_vsoxseg2_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i32) -define void @test_vsoxseg3_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i32) -define void @test_vsoxseg4_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll index 1128eb2c44b9a..c8e7c43754058 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll @@ -1,18796 +1,15676 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i16_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i16_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i16_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32: +define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32: +define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i64: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i64: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32: +define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32: +define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i64: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i64: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32: +define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32: +define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i64: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i64: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16: +define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16: +define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8: +define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8: +define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32: +define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32: +define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16: +define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16: +define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8: +define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8: +define void @test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32: +define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32: +define void @test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i64: +define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i32: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i16: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i8: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsoxseg2_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i64: +define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i32: +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i16: +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i8: +define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i64: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i32: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i16: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i8: +define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i64: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i32: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i16: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i8: +define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i64: +define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i32: +define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i16: +define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i8: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i64: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i32: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i16: +define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i8: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i64: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i64: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i32: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i32: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i16: +define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i16: +define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i8: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i8: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i64: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16: +define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i64: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16: +define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i64: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16: +define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i64: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16: +define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i64: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16: +define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i64: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16: +define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i64: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i64: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16: +define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16: +define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i64: +define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i64: +define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i64: +define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i64: +define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i64: +define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i64: +define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i64: +define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i64: +define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i64: +define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i64: +define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i64: +define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i64: +define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i64: +define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i64: +define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i64: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16: +define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i64: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16: +define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i64: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16: +define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i64: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16: +define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i64: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16: +define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i64: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16: +define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i64: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i64: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16: +define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16: +define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16: +define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i64: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i64: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i64: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16: +define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i64: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16: +define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i64: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16: +define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i64: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16: +define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16: +define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i64: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i64: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i64: +define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i64: +define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16: +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8: +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i64: +define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i64: +define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i64: +define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i64: +define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i64: +define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i64: +define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i64_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i32: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i64_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i32: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i64_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i8: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i64_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i8: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i64_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i64: +define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i64_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i64: +define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i64_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i16: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i64_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i16: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i64: +define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i64: +define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg3_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i64: +define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg4_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i64: +define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg5_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i64: +define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg6_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i64: +define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg7_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i64: +define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i64: +define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg8_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i64: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16: +define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i64: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i64: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i64: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i64: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16: +define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8: +define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i64: +define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32: +define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i64: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i64: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32: +define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32: +define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8: +define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i64: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8: +define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i64: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8: +define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i64: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8: +define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i64: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8: +define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i64: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8: +define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i64: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8: +define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8: +define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i64: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i64: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8i32_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8i32_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8: +define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8: +define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8i32_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i64: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i64: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8i32_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv32i8_nxv32i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv32i8_nxv32i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8: +define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8: +define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i64: +define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i64: +define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i64: +define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i64: +define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i64: +define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i64: +define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i64: +define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i64: +define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i32: +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i32: +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i8: +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i8: +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i16: +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i16: +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i64: +define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i64: +define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i32: +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i32: +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i8: +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i8: +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i16: +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i16: +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i64: +define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i64: +define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i32: +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i32: +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i8: +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i8: +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i16: +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i16: +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i64: +define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i64: +define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv16f16_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16: +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16: +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv16f16_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8: +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8: +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv16f16_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32: +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32: +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f64_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32: +define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32: +define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f64_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8: +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8: +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f64_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i64: +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i64: +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f64_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16: +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16: +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i64: +define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32: +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16: +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8: +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i64: +define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32: +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16: +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8: +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i64: +define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32: +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16: +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8: +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i64: +define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32: +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16: +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8: +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i64: +define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32: +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16: +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8: +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i64: +define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32: +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16: +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8: +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i64: +define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i64: +define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32: +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32: +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16: +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16: +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8: +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8: +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32: +define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8: +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16: +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i64: +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32: +define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8: +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16: +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i64: +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32: +define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8: +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16: +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i64: +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32: +define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8: +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16: +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i64: +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg5_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32: +define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8: +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16: +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i64: +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg6_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32: +define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8: +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16: +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i64: +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg7_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32: +define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32: +define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8: +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8: +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16: +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16: +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i64: +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg8_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i64: +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i64: +define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32: +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16: +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8: +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i64: +define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32: +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16: +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8: +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i64: +define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32: +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16: +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8: +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i64: +define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32: +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16: +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8: +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i64: +define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32: +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16: +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8: +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i64: +define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32: +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16: +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8: +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i64: +define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i64: +define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32: +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32: +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16: +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16: +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8: +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8: +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i64: +define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32: +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16: +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8: +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i64: +define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32: +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16: +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8: +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i64: +define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32: +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16: +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8: +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i64: +define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32: +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16: +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8: +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i64: +define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32: +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16: +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8: +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i64: +define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32: +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16: +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8: +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i64: +define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i64: +define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32: +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32: +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16: +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16: +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8: +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8: +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16: +define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16: +define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8: +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8: +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i64: +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i64: +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32: +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32: +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16: +define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16: +define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8: +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8: +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i64: +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i64: +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32: +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32: +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16: +define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16: +define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8: +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8: +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i64: +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i64: +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32: +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32: +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f32_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16: +define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16: +define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f32_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8: +define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8: +define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f32_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i64: +define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i64: +define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv8f32_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32: +define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32: +define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i64: +define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i64: +define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i64: +define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i64: +define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i64: +define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i64: +define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i64: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16: +define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i64: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i64: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i64: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16: +define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i64: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16: +define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i64: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i64: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16: +define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16: +define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i64: +define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i64: +define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i64: +define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsoxseg5_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i64: +define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg5_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsoxseg6_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i64: +define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg6_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsoxseg7_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i64: +define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg7_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsoxseg8_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i64: +define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg8_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i64: +define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i64: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i64: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i64) -define void @test_vsoxseg2_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16: +define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16: +define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i64: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i64: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i64) -define void @test_vsoxseg3_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16: +define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16: +define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i64: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i64: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i64) -define void @test_vsoxseg4_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16: +define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsoxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16: +define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll index e481c26063c95..330ec59d3459c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll @@ -1,4152 +1,3917 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsseg2.nxv16i16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv16i16(,, ptr, , i32) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg2_nxv16i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv16i16: +define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg2_mask_nxv16i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv16i16: +define void @test_vsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg2.nxv1i8(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv1i8(,, ptr, , i32) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg2_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i8: +define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i8( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg2_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i8: +define void @test_vsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i8( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg3.nxv1i8(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv1i8(,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg3_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i8: +define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i8( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg3_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i8: +define void @test_vsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i8( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg4.nxv1i8(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv1i8(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg4_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i8: +define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i8( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg4_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i8: +define void @test_vsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg5.nxv1i8(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv1i8(,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg5_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i8: +define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i8( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg5_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i8: +define void @test_vsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg6.nxv1i8(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv1i8(,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg6_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i8: +define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg6_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i8: +define void @test_vsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg7.nxv1i8(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv1i8(,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg7_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i8: +define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg7_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i8: +define void @test_vsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg8.nxv1i8(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv1i8(,,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg8_nxv1i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i8: +define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg8_mask_nxv1i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i8: +define void @test_vsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg2.nxv16i8(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv16i8(,, ptr, , i32) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg2_nxv16i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv16i8: +define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16i8( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg2_mask_nxv16i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv16i8: +define void @test_vsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16i8( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg3.nxv16i8(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv16i8(,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg3_nxv16i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv16i8: +define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) + +define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv16i8( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg3_mask_nxv16i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv16i8: +define void @test_vsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv16i8( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg4.nxv16i8(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv16i8(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -define void @test_vsseg4_nxv16i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv16i8: +define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv16i8( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg4_mask_nxv16i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv16i8: +define void @test_vsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv16i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg2.nxv2i32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv2i32(,, ptr, , i32) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg2_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i32: +define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg2_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i32: +define void @test_vsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg3.nxv2i32(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv2i32(,,, ptr, , i32) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg3_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i32: +define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i32( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg3_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i32: +define void @test_vsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i32( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg4.nxv2i32(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv2i32(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg4_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i32: +define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i32( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg4_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i32: +define void @test_vsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg5.nxv2i32(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv2i32(,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg5_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2i32: +define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2i32( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg5_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2i32: +define void @test_vsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg6.nxv2i32(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv2i32(,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg6_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2i32: +define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg6_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2i32: +define void @test_vsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg7.nxv2i32(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv2i32(,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg7_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2i32: +define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg7_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2i32: +define void @test_vsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg8.nxv2i32(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv2i32(,,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg8_nxv2i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2i32: +define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg8_mask_nxv2i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2i32: +define void @test_vsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg2.nxv4i16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv4i16(,, ptr, , i32) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg2_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i16: +define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg2_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i16: +define void @test_vsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg3.nxv4i16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv4i16(,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg3_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4i16: +define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4i16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg3_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4i16: +define void @test_vsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4i16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg4.nxv4i16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv4i16(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg4_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4i16: +define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4i16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg4_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4i16: +define void @test_vsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg5.nxv4i16(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv4i16(,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg5_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv4i16: +define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv4i16( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg5_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv4i16: +define void @test_vsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsseg6.nxv4i16(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv4i16(,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg6_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv4i16: +define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 3) ret void } -define void @test_vsseg6_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv4i16: +define void @test_vsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 3) + ret void +} + +define void @test_vsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) + +define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) + +define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) + +define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) + +define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) + +define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) + +define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) + +define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) + +define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) + +define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) + +define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) + +define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) + +define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) + +define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) + +define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) + +define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) + +define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) + +define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) + +define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) + +define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) + +define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) + ret void +} + +define void @test_vsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) + +define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) + ret void +} + +define void @test_vsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg7.nxv4i16(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv4i16(,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg7_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv4i16: +define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg7_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv4i16: +define void @test_vsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg8.nxv4i16(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv4i16(,,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg8_nxv4i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv4i16: +define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg8_mask_nxv4i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv4i16: +define void @test_vsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg2.nxv1i32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv1i32(,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg2_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i32: +define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i32: +define void @test_vsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg3.nxv1i32(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv1i32(,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg3_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i32: +define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i32( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg3_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i32: +define void @test_vsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i32( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg4.nxv1i32(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv1i32(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg4_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i32: +define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i32( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg4_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i32: +define void @test_vsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg5.nxv1i32(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv1i32(,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg5_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i32: +define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i32( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg5_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i32: +define void @test_vsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg6.nxv1i32(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv1i32(,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -define void @test_vsseg6_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i32: +define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg6_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i32: +define void @test_vsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg7.nxv1i32(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv1i32(,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -define void @test_vsseg7_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i32: +define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg7_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i32: +define void @test_vsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg8.nxv1i32(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv1i32(,,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -define void @test_vsseg8_nxv1i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i32: +define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg8_mask_nxv1i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i32: +define void @test_vsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg2.nxv8i16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv8i16(,, ptr, , i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg2_nxv8i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8i16: +define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8i16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv8i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8i16: +define void @test_vsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8i16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg3.nxv8i16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv8i16(,,, ptr, , i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg3_nxv8i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv8i16: +define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv8i16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg3_mask_nxv8i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv8i16: +define void @test_vsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv8i16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg4.nxv8i16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv8i16(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg4_nxv8i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv8i16: +define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv8i16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg4_mask_nxv8i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv8i16: +define void @test_vsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv8i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg2.nxv8i8(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv8i8(,, ptr, , i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg2_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8i8: +define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8i8( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8i8: +define void @test_vsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8i8( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg3.nxv8i8(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv8i8(,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg3_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv8i8: +define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv8i8( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg3_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv8i8: +define void @test_vsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv8i8( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg4.nxv8i8(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv8i8(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg4_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv8i8: +define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv8i8( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg4_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv8i8: +define void @test_vsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv8i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg5.nxv8i8(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv8i8(,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -define void @test_vsseg5_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv8i8: +define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv8i8( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg5_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv8i8: +define void @test_vsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg6.nxv8i8(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv8i8(,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -define void @test_vsseg6_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv8i8: +define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg6_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv8i8: +define void @test_vsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg7.nxv8i8(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv8i8(,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg7_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv8i8: +define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg7_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv8i8: +define void @test_vsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg8.nxv8i8(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv8i8(,,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg8_nxv8i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv8i8: +define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg8_mask_nxv8i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv8i8: +define void @test_vsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg2.nxv8i32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv8i32(,, ptr, , i32) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -define void @test_vsseg2_nxv8i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8i32: +define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8i32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg2_mask_nxv8i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8i32: +define void @test_vsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8i32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg2.nxv4i8(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv4i8(,, ptr, , i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg2_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i8: +define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i8( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg2_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i8: +define void @test_vsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i8( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg3.nxv4i8(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv4i8(,,, ptr, , i32) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -define void @test_vsseg3_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4i8: +define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4i8( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg3_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4i8: +define void @test_vsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4i8( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg4.nxv4i8(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv4i8(,,,, ptr, , i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -define void @test_vsseg4_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4i8: +define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4i8( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg4_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4i8: +define void @test_vsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg5.nxv4i8(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv4i8(,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -define void @test_vsseg5_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv4i8: +define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv4i8( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg5_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv4i8: +define void @test_vsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg6.nxv4i8(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv4i8(,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -define void @test_vsseg6_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv4i8: +define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg6_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv4i8: +define void @test_vsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg7.nxv4i8(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv4i8(,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -define void @test_vsseg7_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv4i8: +define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg7_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv4i8: +define void @test_vsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg8.nxv4i8(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv4i8(,,,,,,,, ptr, , i32) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -define void @test_vsseg8_nxv4i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv4i8: +define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg8_mask_nxv4i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv4i8: +define void @test_vsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg2.nxv1i16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv1i16(,, ptr, , i32) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -define void @test_vsseg2_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i16: +define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg2_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i16: +define void @test_vsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg3.nxv1i16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv1i16(,,, ptr, , i32) -define void @test_vsseg3_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i16: +define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i16: +define void @test_vsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv1i16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv1i16(,,,, ptr, , i32) -define void @test_vsseg4_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i16: +define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i16: +define void @test_vsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg5.nxv1i16(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv1i16(,,,,, ptr, , i32) -define void @test_vsseg5_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i16: +define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i16( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg5_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i16: +define void @test_vsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg6.nxv1i16(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv1i16(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i16: +define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg6_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i16: +define void @test_vsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg7.nxv1i16(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv1i16(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i16: +define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg7_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i16: +define void @test_vsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg8.nxv1i16(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv1i16(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv1i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i16: +define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg8_mask_nxv1i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i16: +define void @test_vsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv32i8(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv32i8(,, ptr, , i32) -define void @test_vsseg2_nxv32i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv32i8: +define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv32i8( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv32i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv32i8: +define void @test_vsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv32i8( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2i8(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv2i8(,, ptr, , i32) -define void @test_vsseg2_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i8: +define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i8( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i8: +define void @test_vsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i8( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2i8(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv2i8(,,, ptr, , i32) -define void @test_vsseg3_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i8: +define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i8( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i8: +define void @test_vsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i8( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2i8(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv2i8(,,,, ptr, , i32) -define void @test_vsseg4_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i8: +define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i8( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i8: +define void @test_vsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i8( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg5.nxv2i8(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv2i8(,,,,, ptr, , i32) -define void @test_vsseg5_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2i8: +define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2i8( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg5_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2i8: +define void @test_vsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg6.nxv2i8(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv2i8(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2i8: +define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg6_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2i8: +define void @test_vsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg7.nxv2i8(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv2i8(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2i8: +define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg7_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2i8: +define void @test_vsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg8.nxv2i8(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv2i8(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv2i8( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2i8: +define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg8_mask_nxv2i8( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2i8: +define void @test_vsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2i16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv2i16(,, ptr, , i32) -define void @test_vsseg2_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i16: +define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i16: +define void @test_vsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2i16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv2i16(,,, ptr, , i32) -define void @test_vsseg3_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i16: +define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i16: +define void @test_vsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2i16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv2i16(,,,, ptr, , i32) -define void @test_vsseg4_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i16: +define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i16: +define void @test_vsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg5.nxv2i16(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv2i16(,,,,, ptr, , i32) -define void @test_vsseg5_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2i16: +define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2i16( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg5_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2i16: +define void @test_vsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg6.nxv2i16(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv2i16(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2i16: +define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg6_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2i16: +define void @test_vsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg7.nxv2i16(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv2i16(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2i16: +define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg7_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2i16: +define void @test_vsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg8.nxv2i16(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv2i16(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv2i16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2i16: +define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg8_mask_nxv2i16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2i16: +define void @test_vsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4i32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv4i32(,, ptr, , i32) -define void @test_vsseg2_nxv4i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i32: +define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv4i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i32: +define void @test_vsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv4i32(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv4i32(,,, ptr, , i32) -define void @test_vsseg3_nxv4i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4i32: +define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4i32( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv4i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4i32: +define void @test_vsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4i32( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv4i32(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv4i32(,,,, ptr, , i32) -define void @test_vsseg4_nxv4i32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4i32: +define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4i32( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv4i32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4i32: +define void @test_vsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4i32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv16f16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv16f16(,, ptr, , i32) -define void @test_vsseg2_nxv16f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv16f16: +define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16f16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv16f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv16f16: +define void @test_vsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16f16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4f64(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv4f64(,, ptr, , i32) -define void @test_vsseg2_nxv4f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4f64: +define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4f64( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv4f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4f64: +define void @test_vsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4f64( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg2.nxv1f64(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv1f64(,, ptr, , i32) -define void @test_vsseg2_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1f64: +define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1f64( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1f64: +define void @test_vsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1f64( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg3.nxv1f64(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv1f64(,,, ptr, , i32) -define void @test_vsseg3_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1f64: +define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1f64( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg3_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1f64: +define void @test_vsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1f64( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg4.nxv1f64(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv1f64(,,,, ptr, , i32) -define void @test_vsseg4_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1f64: +define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1f64( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg4_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1f64: +define void @test_vsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1f64( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg5.nxv1f64(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv1f64(,,,,, ptr, , i32) -define void @test_vsseg5_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1f64: +define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1f64( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg5_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1f64: +define void @test_vsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1f64( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg6.nxv1f64(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv1f64(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1f64: +define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg6e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg6_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1f64: +define void @test_vsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg7.nxv1f64(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv1f64(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1f64: +define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg7_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1f64: +define void @test_vsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg8.nxv1f64(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv1f64(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv1f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1f64: +define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg8e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg8_mask_nxv1f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1f64: +define void @test_vsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg2.nxv2f32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv2f32(,, ptr, , i32) -define void @test_vsseg2_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2f32: +define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2f32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2f32: +define void @test_vsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2f32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg3.nxv2f32(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv2f32(,,, ptr, , i32) -define void @test_vsseg3_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2f32: +define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2f32( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg3_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2f32: +define void @test_vsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2f32( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg4.nxv2f32(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv2f32(,,,, ptr, , i32) -define void @test_vsseg4_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2f32: +define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2f32( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg4_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2f32: +define void @test_vsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2f32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg5.nxv2f32(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv2f32(,,,,, ptr, , i32) -define void @test_vsseg5_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2f32: +define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2f32( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg5_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2f32: +define void @test_vsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2f32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 5) + ret void +} + +define void @test_vsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg6.nxv2f32(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv2f32(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2f32: +define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg6_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2f32: +define void @test_vsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg7.nxv2f32(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv2f32(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2f32: +define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg7_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2f32: +define void @test_vsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg8.nxv2f32(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv2f32(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv2f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2f32: +define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg8_mask_nxv2f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2f32: +define void @test_vsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg2.nxv1f16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv1f16(,, ptr, , i32) -define void @test_vsseg2_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1f16: +define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1f16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg2_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1f16: +define void @test_vsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1f16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg3.nxv1f16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv1f16(,,, ptr, , i32) -define void @test_vsseg3_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1f16: +define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1f16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 5) ret void } -define void @test_vsseg3_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1f16: +define void @test_vsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1f16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsseg4.nxv1f16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv1f16(,,,, ptr, , i32) -define void @test_vsseg4_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1f16: +define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1f16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg4_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +define void @test_vsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg5.nxv1f16(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv1f16(,,,,, ptr, , i32) -define void @test_vsseg5_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1f16: +define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1f16( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg5_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1f16: +define void @test_vsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1f16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg6.nxv1f16(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv1f16(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1f16: +define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg6_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1f16: +define void @test_vsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg7.nxv1f16(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv1f16(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1f16: +define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg7_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1f16: +define void @test_vsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg8.nxv1f16(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv1f16(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv1f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1f16: +define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg8_mask_nxv1f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1f16: +define void @test_vsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg2.nxv1f32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv1f32(,, ptr, , i32) -define void @test_vsseg2_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1f32: +define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1f32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg2_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1f32: +define void @test_vsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1f32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg3.nxv1f32(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv1f32(,,, ptr, , i32) -define void @test_vsseg3_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1f32: +define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1f32( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg3_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1f32: +define void @test_vsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1f32( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg4.nxv1f32(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv1f32(,,,, ptr, , i32) -define void @test_vsseg4_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1f32: +define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1f32( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg4_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1f32: +define void @test_vsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1f32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg5.nxv1f32(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv1f32(,,,,, ptr, , i32) -define void @test_vsseg5_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1f32: +define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1f32( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg5_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1f32: +define void @test_vsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1f32( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg6.nxv1f32(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv1f32(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1f32: +define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg6_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1f32: +define void @test_vsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg7.nxv1f32(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv1f32(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1f32: +define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 6) ret void } -define void @test_vsseg7_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1f32: +define void @test_vsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsseg8.nxv1f32(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv1f32(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv1f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1f32: +define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg8_mask_nxv1f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1f32: +define void @test_vsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv8f16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv8f16(,, ptr, , i32) -define void @test_vsseg2_nxv8f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8f16: +define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8f16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv8f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8f16: +define void @test_vsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8f16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv8f16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv8f16(,,, ptr, , i32) -define void @test_vsseg3_nxv8f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv8f16: +define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv8f16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv8f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv8f16: +define void @test_vsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv8f16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv8f16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv8f16(,,,, ptr, , i32) -define void @test_vsseg4_nxv8f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv8f16: +define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv8f16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv8f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv8f16: +define void @test_vsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv8f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv8f32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv8f32(,, ptr, , i32) -define void @test_vsseg2_nxv8f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8f32: +define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8f32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv8f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8f32: +define void @test_vsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8f32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2f64(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv2f64(,, ptr, , i32) -define void @test_vsseg2_nxv2f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2f64: +define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2f64( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv2f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2f64: +define void @test_vsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2f64( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2f64(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv2f64(,,, ptr, , i32) -define void @test_vsseg3_nxv2f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2f64: +define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2f64( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv2f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2f64: +define void @test_vsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2f64( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2f64(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv2f64(,,,, ptr, , i32) -define void @test_vsseg4_nxv2f64( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2f64: +define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2f64( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv2f64( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2f64: +define void @test_vsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2f64( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4f16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv4f16(,, ptr, , i32) -define void @test_vsseg2_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4f16: +define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4f16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4f16: +define void @test_vsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4f16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv4f16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv4f16(,,, ptr, , i32) -define void @test_vsseg3_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4f16: +define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4f16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4f16: +define void @test_vsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4f16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv4f16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv4f16(,,,, ptr, , i32) -define void @test_vsseg4_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4f16: +define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4f16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4f16: +define void @test_vsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg5.nxv4f16(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv4f16(,,,,, ptr, , i32) -define void @test_vsseg5_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv4f16: +define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv4f16( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg5_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv4f16: +define void @test_vsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv4f16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg6.nxv4f16(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv4f16(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv4f16: +define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg6_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv4f16: +define void @test_vsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg7.nxv4f16(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv4f16(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv4f16: +define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg7_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv4f16: +define void @test_vsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg8.nxv4f16(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv4f16(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv4f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv4f16: +define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg8_mask_nxv4f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv4f16: +define void @test_vsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2f16(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv2f16(,, ptr, , i32) -define void @test_vsseg2_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2f16: +define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2f16( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2f16: +define void @test_vsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2f16( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2f16(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv2f16(,,, ptr, , i32) -define void @test_vsseg3_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2f16: +define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2f16( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2f16: +define void @test_vsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2f16( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2f16(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv2f16(,,,, ptr, , i32) -define void @test_vsseg4_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2f16: +define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2f16( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2f16: +define void @test_vsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2f16( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg5.nxv2f16(,,,,, ptr , i32) -declare void @llvm.riscv.vsseg5.mask.nxv2f16(,,,,, ptr, , i32) -define void @test_vsseg5_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2f16: +define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2f16( %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg5_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2f16: +define void @test_vsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2f16( %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg6.nxv2f16(,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg6.mask.nxv2f16(,,,,,, ptr, , i32) -define void @test_vsseg6_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2f16: +define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg6_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2f16: +define void @test_vsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg7.nxv2f16(,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg7.mask.nxv2f16(,,,,,,, ptr, , i32) -define void @test_vsseg7_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2f16: +define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg7_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2f16: +define void @test_vsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg8.nxv2f16(,,,,,,,, ptr , i32) -declare void @llvm.riscv.vsseg8.mask.nxv2f16(,,,,,,,, ptr, , i32) -define void @test_vsseg8_nxv2f16( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2f16: +define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg8_mask_nxv2f16( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2f16: +define void @test_vsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4f32(,, ptr , i32) -declare void @llvm.riscv.vsseg2.mask.nxv4f32(,, ptr, , i32) -define void @test_vsseg2_nxv4f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4f32: +define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4f32( %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg2_mask_nxv4f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4f32: +define void @test_vsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4f32( %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg3.nxv4f32(,,, ptr , i32) -declare void @llvm.riscv.vsseg3.mask.nxv4f32(,,, ptr, , i32) -define void @test_vsseg3_nxv4f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4f32: +define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4f32( %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg3_mask_nxv4f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4f32: +define void @test_vsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4f32( %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsseg4.nxv4f32(,,,, ptr , i32) -declare void @llvm.riscv.vsseg4.mask.nxv4f32(,,,, ptr, , i32) -define void @test_vsseg4_nxv4f32( %val, ptr %base, i32 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4f32: +define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4f32( %val, %val, %val, %val, ptr %base, i32 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, i32 4) ret void } -define void @test_vsseg4_mask_nxv4f32( %val, ptr %base, %mask, i32 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4f32: +define void @test_vsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4f32( %val, %val, %val, %val, ptr %base, %mask, i32 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i32 %vl, i32 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll index eb5bb19ec4f57..877eeeaf10039 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -1,4497 +1,3917 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsseg2.nxv16i16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv16i16(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv16i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv16i16: +define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv16i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv16i16: +define void @test_vsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv4i32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4i32(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv4i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i32: +define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv4i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i32: +define void @test_vsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg3.nxv4i32(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv4i32(,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg3_nxv4i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4i32: +define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4i32( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg3_mask_nxv4i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4i32: +define void @test_vsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4i32( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg4.nxv4i32(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv4i32(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg4_nxv4i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4i32: +define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4i32( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg4_mask_nxv4i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4i32: +define void @test_vsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4i32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv16i8(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv16i8(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv16i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv16i8: +define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16i8( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv16i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv16i8: +define void @test_vsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16i8( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg3.nxv16i8(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv16i8(,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg3_nxv16i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv16i8: +define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 3) + ret void +} + +define void @test_vsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) + +define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv16i8( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg3_mask_nxv16i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv16i8: +define void @test_vsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv16i8( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg4.nxv16i8(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv16i8(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg4_nxv16i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv16i8: +define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv16i8( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg4_mask_nxv16i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv16i8: +define void @test_vsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv16i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv1i64(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1i64(,, ptr, , i64) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg2_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i64: +define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i64( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i64: +define void @test_vsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i64( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg3.nxv1i64(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1i64(,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg3_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i64: +define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i64( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg3_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i64: +define void @test_vsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i64( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg4.nxv1i64(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1i64(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg4_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i64: +define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i64( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg4_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i64: +define void @test_vsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg5.nxv1i64(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1i64(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg5_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i64: +define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i64( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg5_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i64: +define void @test_vsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg6.nxv1i64(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1i64(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg6_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i64: +define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg6e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg6_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i64: +define void @test_vsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg7.nxv1i64(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1i64(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg7_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i64: +define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg7_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i64: +define void @test_vsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg8.nxv1i64(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1i64(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg8_nxv1i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i64: +define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg8e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg8_mask_nxv1i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i64: +define void @test_vsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv1i32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1i32(,, ptr, , i64) +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg2_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i32: +define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i32: +define void @test_vsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg3.nxv1i32(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1i32(,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg3_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i32: +define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i32( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg3_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i32: +define void @test_vsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i32( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg4.nxv1i32(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1i32(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg4_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i32: +define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i32( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg4_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i32: +define void @test_vsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg5.nxv1i32(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1i32(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg5_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i32: +define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i32( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg5_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i32: +define void @test_vsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg6.nxv1i32(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1i32(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg6_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i32: +define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg6_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i32: +define void @test_vsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg7.nxv1i32(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1i32(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg7_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i32: +define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg7_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i32: +define void @test_vsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg8.nxv1i32(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1i32(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg8_nxv1i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i32: +define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg8_mask_nxv1i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i32: +define void @test_vsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv8i16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv8i16(,, ptr, , i64) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg2_nxv8i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8i16: +define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8i16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv8i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8i16: +define void @test_vsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8i16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg3.nxv8i16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv8i16(,,, ptr, , i64) +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg3_nxv8i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv8i16: +define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv8i16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg3_mask_nxv8i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv8i16: +define void @test_vsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv8i16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg4.nxv8i16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv8i16(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg4_nxv8i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv8i16: +define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv8i16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg4_mask_nxv8i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv8i16: +define void @test_vsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv8i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv4i8(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4i8(,, ptr, , i64) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg2_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i8: +define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i8( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg2_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i8: +define void @test_vsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i8( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg3.nxv4i8(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv4i8(,,, ptr, , i64) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg3_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4i8: +define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4i8( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg3_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4i8: +define void @test_vsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4i8( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg4.nxv4i8(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv4i8(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg4_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4i8: +define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4i8( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg4_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4i8: +define void @test_vsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg5.nxv4i8(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv4i8(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg5_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv4i8: +define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv4i8( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg5_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv4i8: +define void @test_vsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg6.nxv4i8(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv4i8(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg6_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv4i8: +define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg6_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv4i8: +define void @test_vsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg7.nxv4i8(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv4i8(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg7_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv4i8: +define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg7_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv4i8: +define void @test_vsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg8.nxv4i8(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv4i8(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg8_nxv4i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv4i8: +define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 3) ret void } -define void @test_vsseg8_mask_nxv4i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv4i8: +define void @test_vsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsseg2.nxv1i16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1i16(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i16: +define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i16: +define void @test_vsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv1i16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1i16(,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg3_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i16: +define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i16: +define void @test_vsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv1i16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1i16(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg4_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i16: +define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i16: +define void @test_vsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) + +define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) + +define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) + +define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) + +define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) + +define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) + +define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) + +define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) + +define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) + +define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) + +define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) + +define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) + +define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) + +define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) + +define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) + +define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) + +define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) + +define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) + +define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) + +define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) + +define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) + +define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) + +define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) + ret void +} + +define void @test_vsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) + +define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg5.nxv1i16(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1i16(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg5_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i16: +define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i16( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg5_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i16: +define void @test_vsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg6.nxv1i16(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1i16(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg6_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i16: +define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg6_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i16: +define void @test_vsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg7.nxv1i16(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1i16(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg7_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i16: +define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg7_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i16: +define void @test_vsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg8.nxv1i16(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1i16(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg8_nxv1i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i16: +define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg8_mask_nxv1i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i16: +define void @test_vsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg2.nxv2i32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2i32(,, ptr, , i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg2_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i32: +define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg2_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i32: +define void @test_vsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg3.nxv2i32(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2i32(,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg3_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i32: +define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i32( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg3_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i32: +define void @test_vsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i32( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg4.nxv2i32(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2i32(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg4_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i32: +define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i32( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg4_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i32: +define void @test_vsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg5.nxv2i32(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv2i32(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg5_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2i32: +define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2i32( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg5_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2i32: +define void @test_vsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg6.nxv2i32(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv2i32(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg6_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2i32: +define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg6_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2i32: +define void @test_vsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg7.nxv2i32(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv2i32(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg7_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2i32: +define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg7_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2i32: +define void @test_vsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg8.nxv2i32(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv2i32(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg8_nxv2i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2i32: +define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg8_mask_nxv2i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2i32: +define void @test_vsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg2.nxv8i8(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv8i8(,, ptr, , i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg2_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8i8: +define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8i8( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg2_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8i8: +define void @test_vsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8i8( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg3.nxv8i8(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv8i8(,,, ptr, , i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg3_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv8i8: +define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv8i8( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg3_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv8i8: +define void @test_vsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv8i8( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg4.nxv8i8(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv8i8(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg4_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv8i8: +define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv8i8( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg4_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv8i8: +define void @test_vsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv8i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg5.nxv8i8(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv8i8(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg5_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv8i8: +define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv8i8( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg5_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv8i8: +define void @test_vsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg6.nxv8i8(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv8i8(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg6_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv8i8: +define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg6_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv8i8: +define void @test_vsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg7.nxv8i8(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv8i8(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg7_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv8i8: +define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg7_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv8i8: +define void @test_vsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg8.nxv8i8(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv8i8(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg8_nxv8i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv8i8: +define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg8_mask_nxv8i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv8i8: +define void @test_vsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg2.nxv4i64(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4i64(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv4i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i64: +define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i64( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg2_mask_nxv4i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i64: +define void @test_vsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i64( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg2.nxv4i16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4i16(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4i16: +define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4i16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg2_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4i16: +define void @test_vsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4i16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg3.nxv4i16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv4i16(,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg3_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4i16: +define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4i16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg3_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4i16: +define void @test_vsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4i16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg4.nxv4i16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv4i16(,,,, ptr, , i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -define void @test_vsseg4_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4i16: +define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4i16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg4_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4i16: +define void @test_vsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg5.nxv4i16(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv4i16(,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg5_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv4i16: +define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv4i16( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg5_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv4i16: +define void @test_vsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg6.nxv4i16(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv4i16(,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -define void @test_vsseg6_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv4i16: +define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg6_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv4i16: +define void @test_vsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg7.nxv4i16(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv4i16(,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -define void @test_vsseg7_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv4i16: +define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg7_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv4i16: +define void @test_vsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg8.nxv4i16(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv4i16(,,,,,,,, ptr, , i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -define void @test_vsseg8_nxv4i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv4i16: +define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg8_mask_nxv4i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv4i16: +define void @test_vsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg2.nxv1i8(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1i8(,, ptr, , i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -define void @test_vsseg2_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1i8: +define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1i8( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg2_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1i8: +define void @test_vsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1i8( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg3.nxv1i8(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1i8(,,, ptr, , i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -define void @test_vsseg3_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1i8: +define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1i8( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg3_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1i8: +define void @test_vsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1i8( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg4.nxv1i8(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1i8(,,,, ptr, , i64) -define void @test_vsseg4_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1i8: +define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1i8( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1i8: +define void @test_vsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg5.nxv1i8(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1i8(,,,,, ptr, , i64) -define void @test_vsseg5_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1i8: +define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1i8( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg5_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1i8: +define void @test_vsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg6.nxv1i8(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1i8(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1i8: +define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg6_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1i8: +define void @test_vsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg7.nxv1i8(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1i8(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1i8: +define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg7_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1i8: +define void @test_vsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg8.nxv1i8(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1i8(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv1i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1i8: +define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg8_mask_nxv1i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1i8: +define void @test_vsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2i8(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2i8(,, ptr, , i64) -define void @test_vsseg2_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i8: +define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i8( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i8: +define void @test_vsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i8( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2i8(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2i8(,,, ptr, , i64) -define void @test_vsseg3_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i8: +define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i8( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i8: +define void @test_vsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i8( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2i8(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2i8(,,,, ptr, , i64) -define void @test_vsseg4_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i8: +define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i8( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i8: +define void @test_vsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i8( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg5.nxv2i8(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv2i8(,,,,, ptr, , i64) -define void @test_vsseg5_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2i8: +define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2i8( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg5_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2i8: +define void @test_vsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg6.nxv2i8(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv2i8(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2i8: +define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg6_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2i8: +define void @test_vsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg7.nxv2i8(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv2i8(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2i8: +define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg7_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2i8: +define void @test_vsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg8.nxv2i8(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv2i8(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv2i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2i8: +define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg8_mask_nxv2i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2i8: +define void @test_vsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv8i32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv8i32(,, ptr, , i64) -define void @test_vsseg2_nxv8i32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8i32: +define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8i32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv8i32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8i32: +define void @test_vsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8i32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv32i8(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv32i8(,, ptr, , i64) -define void @test_vsseg2_nxv32i8( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv32i8: +define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv32i8( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv32i8( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv32i8: +define void @test_vsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv32i8( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2i16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2i16(,, ptr, , i64) -define void @test_vsseg2_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i16: +define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i16: +define void @test_vsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2i16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2i16(,,, ptr, , i64) -define void @test_vsseg3_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i16: +define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i16: +define void @test_vsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2i16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2i16(,,,, ptr, , i64) -define void @test_vsseg4_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i16: +define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i16: +define void @test_vsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg5.nxv2i16(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv2i16(,,,,, ptr, , i64) -define void @test_vsseg5_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2i16: +define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2i16( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg5_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2i16: +define void @test_vsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg6.nxv2i16(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv2i16(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2i16: +define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg6_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2i16: +define void @test_vsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg7.nxv2i16(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv2i16(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2i16: +define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg7_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2i16: +define void @test_vsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg8.nxv2i16(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv2i16(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv2i16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2i16: +define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg8_mask_nxv2i16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2i16: +define void @test_vsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2i64(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2i64(,, ptr, , i64) -define void @test_vsseg2_nxv2i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2i64: +define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2i64( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv2i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2i64: +define void @test_vsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2i64( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2i64(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2i64(,,, ptr, , i64) -define void @test_vsseg3_nxv2i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2i64: +define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2i64( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv2i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2i64: +define void @test_vsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2i64( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2i64(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2i64(,,,, ptr, , i64) -define void @test_vsseg4_nxv2i64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2i64: +define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2i64( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv2i64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2i64: +define void @test_vsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2i64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv16f16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv16f16(,, ptr, , i64) -define void @test_vsseg2_nxv16f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv16f16: +define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16f16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv16f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv16f16: +define void @test_vsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16f16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4f64(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4f64(,, ptr, , i64) -define void @test_vsseg2_nxv4f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4f64: +define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4f64( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg2_mask_nxv4f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4f64: +define void @test_vsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4f64( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg2.nxv1f64(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1f64(,, ptr, , i64) -define void @test_vsseg2_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1f64: +define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1f64( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg2_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1f64: +define void @test_vsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1f64( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg3.nxv1f64(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1f64(,,, ptr, , i64) -define void @test_vsseg3_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1f64: +define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1f64( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg3_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1f64: +define void @test_vsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1f64( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg4.nxv1f64(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1f64(,,,, ptr, , i64) -define void @test_vsseg4_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1f64: +define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1f64( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg4_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1f64: +define void @test_vsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1f64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg5.nxv1f64(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1f64(,,,,, ptr, , i64) -define void @test_vsseg5_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1f64: +define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1f64( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg5_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1f64: +define void @test_vsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1f64( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg6.nxv1f64(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1f64(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1f64: +define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg6e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg6_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1f64: +define void @test_vsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg7.nxv1f64(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1f64(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1f64: +define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg7_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1f64: +define void @test_vsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg8.nxv1f64(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1f64(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv1f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1f64: +define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg8e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg8_mask_nxv1f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1f64: +define void @test_vsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg2.nxv2f32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2f32(,, ptr, , i64) -define void @test_vsseg2_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2f32: +define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2f32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg2_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2f32: +define void @test_vsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2f32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg3.nxv2f32(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2f32(,,, ptr, , i64) -define void @test_vsseg3_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2f32: +define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2f32( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg3_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2f32: +define void @test_vsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2f32( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg4.nxv2f32(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2f32(,,,, ptr, , i64) -define void @test_vsseg4_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2f32: +define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2f32( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg4_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2f32: +define void @test_vsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2f32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg5.nxv2f32(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv2f32(,,,,, ptr, , i64) -define void @test_vsseg5_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2f32: +define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2f32( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg5_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2f32: +define void @test_vsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2f32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg6.nxv2f32(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv2f32(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2f32: +define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg6_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2f32: +define void @test_vsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg7.nxv2f32(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv2f32(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2f32: +define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg7_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2f32: +define void @test_vsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg8.nxv2f32(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv2f32(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv2f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2f32: +define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg8_mask_nxv2f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2f32: +define void @test_vsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg2.nxv1f16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1f16(,, ptr, , i64) -define void @test_vsseg2_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1f16: +define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1f16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg2_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1f16: +define void @test_vsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1f16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg3.nxv1f16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1f16(,,, ptr, , i64) -define void @test_vsseg3_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1f16: +define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1f16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 5) ret void } -define void @test_vsseg3_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1f16: +define void @test_vsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1f16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsseg4.nxv1f16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1f16(,,,, ptr, , i64) -define void @test_vsseg4_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1f16: +define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1f16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg4_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +define void @test_vsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg5.nxv1f16(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1f16(,,,,, ptr, , i64) -define void @test_vsseg5_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1f16: +define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1f16( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg5_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1f16: +define void @test_vsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1f16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg6.nxv1f16(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1f16(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1f16: +define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg6_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1f16: +define void @test_vsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg7.nxv1f16(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1f16(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1f16: +define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg7_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1f16: +define void @test_vsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg8.nxv1f16(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1f16(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv1f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1f16: +define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg8_mask_nxv1f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1f16: +define void @test_vsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg2.nxv1f32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv1f32(,, ptr, , i64) -define void @test_vsseg2_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv1f32: +define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv1f32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg2_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv1f32: +define void @test_vsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv1f32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg3.nxv1f32(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv1f32(,,, ptr, , i64) -define void @test_vsseg3_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv1f32: +define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv1f32( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg3_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv1f32: +define void @test_vsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv1f32( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg4.nxv1f32(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv1f32(,,,, ptr, , i64) -define void @test_vsseg4_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv1f32: +define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv1f32( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg4_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv1f32: +define void @test_vsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv1f32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg5.nxv1f32(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv1f32(,,,,, ptr, , i64) -define void @test_vsseg5_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv1f32: +define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv1f32( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg5_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv1f32: +define void @test_vsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv1f32( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg6.nxv1f32(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv1f32(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv1f32: +define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg6_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv1f32: +define void @test_vsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg7.nxv1f32(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv1f32(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv1f32: +define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 6) ret void } -define void @test_vsseg7_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv1f32: +define void @test_vsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsseg8.nxv1f32(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv1f32(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv1f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv1f32: +define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg8_mask_nxv1f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv1f32: +define void @test_vsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv8f16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv8f16(,, ptr, , i64) -define void @test_vsseg2_nxv8f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8f16: +define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8f16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv8f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8f16: +define void @test_vsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8f16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv8f16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv8f16(,,, ptr, , i64) -define void @test_vsseg3_nxv8f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv8f16: +define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv8f16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv8f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv8f16: +define void @test_vsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv8f16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv8f16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv8f16(,,,, ptr, , i64) -define void @test_vsseg4_nxv8f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv8f16: +define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv8f16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv8f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv8f16: +define void @test_vsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv8f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv8f32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv8f32(,, ptr, , i64) -define void @test_vsseg2_nxv8f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv8f32: +define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv8f32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv8f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv8f32: +define void @test_vsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv8f32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2f64(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2f64(,, ptr, , i64) -define void @test_vsseg2_nxv2f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2f64: +define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2f64( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv2f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2f64: +define void @test_vsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2f64( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2f64(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2f64(,,, ptr, , i64) -define void @test_vsseg3_nxv2f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2f64: +define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2f64( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv2f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2f64: +define void @test_vsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2f64( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2f64(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2f64(,,,, ptr, , i64) -define void @test_vsseg4_nxv2f64( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2f64: +define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2f64( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv2f64( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2f64: +define void @test_vsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2f64( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4f16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4f16(,, ptr, , i64) -define void @test_vsseg2_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4f16: +define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4f16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4f16: +define void @test_vsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4f16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv4f16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv4f16(,,, ptr, , i64) -define void @test_vsseg3_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4f16: +define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4f16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4f16: +define void @test_vsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4f16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv4f16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv4f16(,,,, ptr, , i64) -define void @test_vsseg4_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4f16: +define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4f16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4f16: +define void @test_vsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg5.nxv4f16(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv4f16(,,,,, ptr, , i64) -define void @test_vsseg5_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv4f16: +define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv4f16( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg5_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv4f16: +define void @test_vsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv4f16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg6.nxv4f16(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv4f16(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv4f16: +define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg6_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv4f16: +define void @test_vsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg7.nxv4f16(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv4f16(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv4f16: +define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg7_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv4f16: +define void @test_vsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg8.nxv4f16(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv4f16(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv4f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv4f16: +define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg8_mask_nxv4f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv4f16: +define void @test_vsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv2f16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv2f16(,, ptr, , i64) -define void @test_vsseg2_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv2f16: +define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv2f16( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv2f16: +define void @test_vsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv2f16( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv2f16(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv2f16(,,, ptr, , i64) -define void @test_vsseg3_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv2f16: +define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv2f16( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv2f16: +define void @test_vsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv2f16( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv2f16(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv2f16(,,,, ptr, , i64) -define void @test_vsseg4_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv2f16: +define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv2f16( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv2f16: +define void @test_vsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv2f16( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg5.nxv2f16(,,,,, ptr , i64) -declare void @llvm.riscv.vsseg5.mask.nxv2f16(,,,,, ptr, , i64) -define void @test_vsseg5_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg5_nxv2f16: +define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.nxv2f16( %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg5_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg5_mask_nxv2f16: +define void @test_vsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg5.mask.nxv2f16( %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg6.nxv2f16(,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg6.mask.nxv2f16(,,,,,, ptr, , i64) -define void @test_vsseg6_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg6_nxv2f16: +define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg6_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg6_mask_nxv2f16: +define void @test_vsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg7.nxv2f16(,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg7.mask.nxv2f16(,,,,,,, ptr, , i64) -define void @test_vsseg7_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg7_nxv2f16: +define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg7_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg7_mask_nxv2f16: +define void @test_vsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg8.nxv2f16(,,,,,,,, ptr , i64) -declare void @llvm.riscv.vsseg8.mask.nxv2f16(,,,,,,,, ptr, , i64) -define void @test_vsseg8_nxv2f16( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg8_nxv2f16: +define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg8_mask_nxv2f16( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg8_mask_nxv2f16: +define void @test_vsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg2.nxv4f32(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv4f32(,, ptr, , i64) -define void @test_vsseg2_nxv4f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg2_nxv4f32: +define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv4f32( %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg2_mask_nxv4f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg2_mask_nxv4f32: +define void @test_vsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv4f32( %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg3.nxv4f32(,,, ptr , i64) -declare void @llvm.riscv.vsseg3.mask.nxv4f32(,,, ptr, , i64) -define void @test_vsseg3_nxv4f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg3_nxv4f32: +define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.nxv4f32( %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg3_mask_nxv4f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg3_mask_nxv4f32: +define void @test_vsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg3.mask.nxv4f32( %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsseg4.nxv4f32(,,,, ptr , i64) -declare void @llvm.riscv.vsseg4.mask.nxv4f32(,,,, ptr, , i64) -define void @test_vsseg4_nxv4f32( %val, ptr %base, i64 %vl) { -; CHECK-LABEL: test_vsseg4_nxv4f32: +define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.nxv4f32( %val, %val, %val, %val, ptr %base, i64 %vl) + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 4) ret void } -define void @test_vsseg4_mask_nxv4f32( %val, ptr %base, %mask, i64 %vl) { -; CHECK-LABEL: test_vsseg4_mask_nxv4f32: +define void @test_vsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg4.mask.nxv4f32( %val, %val, %val, %val, ptr %base, %mask, i64 %vl) + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll index e337653bd0c24..df443d6f4d93a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll @@ -2,4151 +2,3916 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vssseg2.nxv16i16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv16i16(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv16i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv16i16: +define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg2_mask_nxv16i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv16i16: +define void @test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg2.nxv1i8(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv1i8(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i8: +define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i8( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg2_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i8: +define void @test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i8( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg3.nxv1i8(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv1i8(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i8: +define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i8( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg3_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i8: +define void @test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i8( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg4.nxv1i8(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv1i8(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i8: +define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i8( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg4_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i8: +define void @test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i8( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg5.nxv1i8(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv1i8(,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg5_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i8: +define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg5_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i8: +define void @test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg6.nxv1i8(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv1i8(,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg6_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i8: +define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg6_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i8: +define void @test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg7.nxv1i8(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv1i8(,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg7_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i8: +define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg7_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i8: +define void @test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg8.nxv1i8(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv1i8(,,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg8_nxv1i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i8: +define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg8_mask_nxv1i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i8: +define void @test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg2.nxv16i8(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv16i8(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv16i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv16i8: +define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16i8( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg2_mask_nxv16i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv16i8: +define void @test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16i8( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg3.nxv16i8(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv16i8(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv16i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv16i8: +define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) + +define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv16i8( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg3_mask_nxv16i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv16i8: +define void @test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv16i8( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg4.nxv16i8(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv16i8(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv16i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv16i8: +define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv16i8( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg4_mask_nxv16i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv16i8: +define void @test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv16i8( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg2.nxv2i32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv2i32(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i32: +define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg2_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i32: +define void @test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg3.nxv2i32(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv2i32(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i32: +define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i32( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg3_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i32: +define void @test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i32( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg4.nxv2i32(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv2i32(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i32: +define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i32( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg4_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i32: +define void @test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i32( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg5.nxv2i32(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv2i32(,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg5_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2i32: +define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2i32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg5_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2i32: +define void @test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2i32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg6.nxv2i32(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv2i32(,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg6_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2i32: +define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg6_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2i32: +define void @test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg7.nxv2i32(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv2i32(,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg7_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2i32: +define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg7_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2i32: +define void @test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg8.nxv2i32(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv2i32(,,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg8_nxv2i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2i32: +define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg8_mask_nxv2i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2i32: +define void @test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg2.nxv4i16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv4i16(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i16: +define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg2_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i16: +define void @test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg3.nxv4i16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv4i16(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4i16: +define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4i16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg3_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4i16: +define void @test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4i16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg4.nxv4i16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv4i16(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4i16: +define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4i16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg4_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4i16: +define void @test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4i16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg5.nxv4i16(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv4i16(,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg5_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv4i16: +define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv4i16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg5_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv4i16: +define void @test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv4i16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vssseg6.nxv4i16(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv4i16(,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg6_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv4i16: +define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) ret void } -define void @test_vssseg6_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv4i16: +define void @test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) + ret void +} + +define void @test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 3) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) + +define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) + +define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) + +define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) + +define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) + +define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) + +define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) + +define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) + +define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) + +define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) + +define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) + +define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) + +define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) + +define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) + +define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) + +define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) + +define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) + +define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) + +define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) + +define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) + +define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) + ret void +} + +define void @test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) + +define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) + ret void +} + +define void @test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg7.nxv4i16(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv4i16(,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg7_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv4i16: +define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg7_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv4i16: +define void @test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg8.nxv4i16(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv4i16(,,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg8_nxv4i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv4i16: +define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg8_mask_nxv4i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv4i16: +define void @test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg2.nxv1i32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv1i32(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i32: +define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i32: +define void @test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg3.nxv1i32(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv1i32(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i32: +define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i32( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg3_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i32: +define void @test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i32( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg4.nxv1i32(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv1i32(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i32: +define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i32( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg4_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i32: +define void @test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i32( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg5.nxv1i32(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv1i32(,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg5_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i32: +define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg5_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i32: +define void @test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg6.nxv1i32(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv1i32(,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) -define void @test_vssseg6_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i32: +define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg6_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i32: +define void @test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg7.nxv1i32(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv1i32(,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) -define void @test_vssseg7_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i32: +define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg7_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i32: +define void @test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg8.nxv1i32(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv1i32(,,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) -define void @test_vssseg8_nxv1i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i32: +define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg8_mask_nxv1i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i32: +define void @test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg2.nxv8i16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv8i16(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv8i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8i16: +define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8i16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv8i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8i16: +define void @test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8i16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg3.nxv8i16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv8i16(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv8i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv8i16: +define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv8i16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg3_mask_nxv8i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv8i16: +define void @test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv8i16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg4.nxv8i16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv8i16(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv8i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv8i16: +define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv8i16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg4_mask_nxv8i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv8i16: +define void @test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv8i16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg2.nxv8i8(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv8i8(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8i8: +define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8i8( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8i8: +define void @test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8i8( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg3.nxv8i8(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv8i8(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv8i8: +define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv8i8( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg3_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv8i8: +define void @test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv8i8( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg4.nxv8i8(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv8i8(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv8i8: +define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv8i8( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg4_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv8i8: +define void @test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv8i8( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg5.nxv8i8(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv8i8(,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) -define void @test_vssseg5_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv8i8: +define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv8i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg5_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv8i8: +define void @test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv8i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg6.nxv8i8(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv8i8(,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) -define void @test_vssseg6_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv8i8: +define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg6_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv8i8: +define void @test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg7.nxv8i8(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv8i8(,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg7_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv8i8: +define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg7_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv8i8: +define void @test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg8.nxv8i8(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv8i8(,,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg8_nxv8i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv8i8: +define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg8_mask_nxv8i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv8i8: +define void @test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg2.nxv8i32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv8i32(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv8i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8i32: +define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8i32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg2_mask_nxv8i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8i32: +define void @test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8i32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg2.nxv4i8(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv4i8(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i8: +define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i8( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg2_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i8: +define void @test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i8( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg3.nxv4i8(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv4i8(,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) -define void @test_vssseg3_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4i8: +define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4i8( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg3_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4i8: +define void @test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4i8( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg4.nxv4i8(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv4i8(,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) -define void @test_vssseg4_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4i8: +define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4i8( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg4_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4i8: +define void @test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4i8( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg5.nxv4i8(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv4i8(,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) -define void @test_vssseg5_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv4i8: +define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv4i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg5_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv4i8: +define void @test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv4i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg6.nxv4i8(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv4i8(,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) -define void @test_vssseg6_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv4i8: +define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg6_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv4i8: +define void @test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg7.nxv4i8(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv4i8(,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) -define void @test_vssseg7_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv4i8: +define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg7_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv4i8: +define void @test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg8.nxv4i8(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv4i8(,,,,,,,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) -define void @test_vssseg8_nxv4i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv4i8: +define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg8_mask_nxv4i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv4i8: +define void @test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg2.nxv1i16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv1i16(,, ptr, i32, , i32) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) -define void @test_vssseg2_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i16: +define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg2_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i16: +define void @test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg3.nxv1i16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv1i16(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i16: +define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i16: +define void @test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv1i16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv1i16(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i16: +define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i16: +define void @test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg5.nxv1i16(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv1i16(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i16: +define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg5_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i16: +define void @test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg6.nxv1i16(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv1i16(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i16: +define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg6_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i16: +define void @test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg7.nxv1i16(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv1i16(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i16: +define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg7_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i16: +define void @test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg8.nxv1i16(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv1i16(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv1i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i16: +define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg8_mask_nxv1i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i16: +define void @test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv32i8(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv32i8(,, ptr, i32, , i32) -define void @test_vssseg2_nxv32i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv32i8: +define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv32i8( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv32i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv32i8: +define void @test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv32i8( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2i8(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv2i8(,, ptr, i32, , i32) -define void @test_vssseg2_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i8: +define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i8( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i8: +define void @test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i8( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2i8(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv2i8(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i8: +define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i8( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i8: +define void @test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i8( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2i8(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv2i8(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i8: +define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i8( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i8: +define void @test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i8( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg5.nxv2i8(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv2i8(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2i8: +define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg5_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2i8: +define void @test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2i8( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg6.nxv2i8(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv2i8(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2i8: +define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg6_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2i8: +define void @test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg7.nxv2i8(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv2i8(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2i8: +define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg7_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2i8: +define void @test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg8.nxv2i8(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv2i8(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv2i8( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2i8: +define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg8_mask_nxv2i8( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2i8: +define void @test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2i16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv2i16(,, ptr, i32, , i32) -define void @test_vssseg2_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i16: +define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i16: +define void @test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2i16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv2i16(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i16: +define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i16: +define void @test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2i16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv2i16(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i16: +define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i16: +define void @test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg5.nxv2i16(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv2i16(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2i16: +define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2i16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg5_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2i16: +define void @test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2i16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg6.nxv2i16(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv2i16(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2i16: +define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg6_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2i16: +define void @test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg7.nxv2i16(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv2i16(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2i16: +define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg7_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2i16: +define void @test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg8.nxv2i16(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv2i16(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv2i16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2i16: +define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg8_mask_nxv2i16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2i16: +define void @test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4i32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv4i32(,, ptr, i32, , i32) -define void @test_vssseg2_nxv4i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i32: +define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv4i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i32: +define void @test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv4i32(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv4i32(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv4i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4i32: +define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4i32( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv4i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4i32: +define void @test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4i32( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv4i32(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv4i32(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv4i32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4i32: +define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4i32( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv4i32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4i32: +define void @test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4i32( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv16f16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv16f16(,, ptr, i32, , i32) -define void @test_vssseg2_nxv16f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv16f16: +define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16f16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv16f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv16f16: +define void @test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16f16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4f64(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv4f64(,, ptr, i32, , i32) -define void @test_vssseg2_nxv4f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4f64: +define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4f64( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv4f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4f64: +define void @test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4f64( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg2.nxv1f64(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv1f64(,, ptr, i32, , i32) -define void @test_vssseg2_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1f64: +define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1f64( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1f64: +define void @test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1f64( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg3.nxv1f64(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv1f64(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1f64: +define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1f64( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg3_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1f64: +define void @test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1f64( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg4.nxv1f64(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv1f64(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1f64: +define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1f64( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg4_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1f64: +define void @test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1f64( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg5.nxv1f64(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv1f64(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1f64: +define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1f64( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg5_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1f64: +define void @test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1f64( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg6.nxv1f64(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv1f64(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1f64: +define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg6_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1f64: +define void @test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg7.nxv1f64(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv1f64(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1f64: +define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg7_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1f64: +define void @test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg8.nxv1f64(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv1f64(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv1f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1f64: +define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg8_mask_nxv1f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1f64: +define void @test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg2.nxv2f32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv2f32(,, ptr, i32, , i32) -define void @test_vssseg2_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2f32: +define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2f32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2f32: +define void @test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2f32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg3.nxv2f32(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv2f32(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2f32: +define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2f32( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg3_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2f32: +define void @test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2f32( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg4.nxv2f32(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv2f32(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2f32: +define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2f32( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg4_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2f32: +define void @test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2f32( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg5.nxv2f32(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv2f32(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2f32: +define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2f32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg5_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2f32: +define void @test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2f32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) + ret void +} + +define void @test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg6.nxv2f32(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv2f32(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2f32: +define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg6_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2f32: +define void @test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg7.nxv2f32(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv2f32(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2f32: +define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg7_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2f32: +define void @test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg8.nxv2f32(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv2f32(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv2f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2f32: +define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg8_mask_nxv2f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2f32: +define void @test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg2.nxv1f16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv1f16(,, ptr, i32, , i32) -define void @test_vssseg2_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1f16: +define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1f16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg2_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1f16: +define void @test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1f16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg3.nxv1f16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv1f16(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1f16: +define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1f16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) ret void } -define void @test_vssseg3_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1f16: +define void @test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1f16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vssseg4.nxv1f16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv1f16(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1f16: +define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1f16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg4_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +define void @test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1f16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg5.nxv1f16(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv1f16(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1f16: +define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1f16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg5_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1f16: +define void @test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1f16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg6.nxv1f16(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv1f16(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1f16: +define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg6_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1f16: +define void @test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg7.nxv1f16(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv1f16(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1f16: +define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg7_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1f16: +define void @test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg8.nxv1f16(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv1f16(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv1f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1f16: +define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg8_mask_nxv1f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1f16: +define void @test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg2.nxv1f32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv1f32(,, ptr, i32, , i32) -define void @test_vssseg2_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1f32: +define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1f32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg2_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1f32: +define void @test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1f32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg3.nxv1f32(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv1f32(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1f32: +define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1f32( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg3_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1f32: +define void @test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1f32( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg4.nxv1f32(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv1f32(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1f32: +define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1f32( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg4_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1f32: +define void @test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1f32( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg5.nxv1f32(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv1f32(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1f32: +define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1f32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg5_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1f32: +define void @test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1f32( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg6.nxv1f32(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv1f32(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1f32: +define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg6_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1f32: +define void @test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg7.nxv1f32(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv1f32(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1f32: +define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 6) ret void } -define void @test_vssseg7_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1f32: +define void @test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vssseg8.nxv1f32(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv1f32(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv1f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1f32: +define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg8_mask_nxv1f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1f32: +define void @test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv8f16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv8f16(,, ptr, i32, , i32) -define void @test_vssseg2_nxv8f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8f16: +define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8f16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv8f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8f16: +define void @test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8f16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv8f16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv8f16(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv8f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv8f16: +define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv8f16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv8f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv8f16: +define void @test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv8f16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv8f16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv8f16(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv8f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv8f16: +define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv8f16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv8f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv8f16: +define void @test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv8f16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv8f32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv8f32(,, ptr, i32, , i32) -define void @test_vssseg2_nxv8f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8f32: +define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8f32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv8f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8f32: +define void @test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8f32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2f64(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv2f64(,, ptr, i32, , i32) -define void @test_vssseg2_nxv2f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2f64: +define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2f64( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv2f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2f64: +define void @test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2f64( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2f64(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv2f64(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv2f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2f64: +define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2f64( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv2f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2f64: +define void @test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2f64( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2f64(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv2f64(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv2f64( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2f64: +define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2f64( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv2f64( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2f64: +define void @test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2f64( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4f16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv4f16(,, ptr, i32, , i32) -define void @test_vssseg2_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4f16: +define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4f16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4f16: +define void @test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4f16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv4f16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv4f16(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4f16: +define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4f16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4f16: +define void @test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4f16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv4f16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv4f16(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4f16: +define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4f16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4f16: +define void @test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4f16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg5.nxv4f16(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv4f16(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv4f16: +define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv4f16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg5_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv4f16: +define void @test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv4f16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg6.nxv4f16(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv4f16(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv4f16: +define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg6_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv4f16: +define void @test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg7.nxv4f16(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv4f16(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv4f16: +define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg7_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv4f16: +define void @test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg8.nxv4f16(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv4f16(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv4f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv4f16: +define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg8_mask_nxv4f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv4f16: +define void @test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2f16(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv2f16(,, ptr, i32, , i32) -define void @test_vssseg2_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2f16: +define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2f16( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2f16: +define void @test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2f16( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2f16(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv2f16(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2f16: +define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2f16( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2f16: +define void @test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2f16( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2f16(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv2f16(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2f16: +define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2f16( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2f16: +define void @test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2f16( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg5.nxv2f16(,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg5.mask.nxv2f16(,,,,, ptr, i32, , i32) -define void @test_vssseg5_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2f16: +define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2f16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg5_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2f16: +define void @test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2f16( %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg6.nxv2f16(,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg6.mask.nxv2f16(,,,,,, ptr, i32, , i32) -define void @test_vssseg6_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2f16: +define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg6_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2f16: +define void @test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg7.nxv2f16(,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg7.mask.nxv2f16(,,,,,,, ptr, i32, , i32) -define void @test_vssseg7_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2f16: +define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg7_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2f16: +define void @test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg8.nxv2f16(,,,,,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg8.mask.nxv2f16(,,,,,,,, ptr, i32, , i32) -define void @test_vssseg8_nxv2f16( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2f16: +define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg8_mask_nxv2f16( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2f16: +define void @test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4f32(,, ptr, i32, i32) -declare void @llvm.riscv.vssseg2.mask.nxv4f32(,, ptr, i32, , i32) -define void @test_vssseg2_nxv4f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4f32: +define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4f32( %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg2_mask_nxv4f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4f32: +define void @test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4f32( %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg3.nxv4f32(,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg3.mask.nxv4f32(,,, ptr, i32, , i32) -define void @test_vssseg3_nxv4f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4f32: +define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4f32( %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg3_mask_nxv4f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4f32: +define void @test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4f32( %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vssseg4.nxv4f32(,,,, ptr, i32, i32) -declare void @llvm.riscv.vssseg4.mask.nxv4f32(,,,, ptr, i32, , i32) -define void @test_vssseg4_nxv4f32( %val, ptr %base, i32 %offset, i32 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4f32: +define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4f32( %val, %val, %val, %val, ptr %base, i32 %offset, i32 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) ret void } -define void @test_vssseg4_mask_nxv4f32( %val, ptr %base, i32 %offset, %mask, i32 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4f32: +define void @test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4f32( %val, %val, %val, %val, ptr %base, i32 %offset, %mask, i32 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, %mask, i32 %vl, i32 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll index 2ef2173696771..dd6faad09f49a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll @@ -2,4496 +2,3916 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vssseg2.nxv16i16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv16i16(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv16i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv16i16: +define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv16i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv16i16: +define void @test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv4i32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4i32(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv4i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i32: +define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv4i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i32: +define void @test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg3.nxv4i32(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv4i32(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv4i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4i32: +define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4i32( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg3_mask_nxv4i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4i32: +define void @test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4i32( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg4.nxv4i32(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv4i32(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv4i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4i32: +define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4i32( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg4_mask_nxv4i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4i32: +define void @test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4i32( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv16i8(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv16i8(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv16i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv16i8: +define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16i8( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv16i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv16i8: +define void @test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16i8( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg3.nxv16i8(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv16i8(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv16i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv16i8: +define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3) + ret void +} + +define void @test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) + +define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv16i8( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg3_mask_nxv16i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv16i8: +define void @test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv16i8( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg4.nxv16i8(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv16i8(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv16i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv16i8: +define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv16i8( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg4_mask_nxv16i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv16i8: +define void @test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv16i8( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv1i64(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1i64(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i64: +define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i64( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i64: +define void @test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i64( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg3.nxv1i64(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1i64(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i64: +define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i64( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg3_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i64: +define void @test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i64( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg4.nxv1i64(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1i64(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i64: +define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i64( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg4_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i64: +define void @test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i64( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg5.nxv1i64(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1i64(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i64: +define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i64( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg5_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i64: +define void @test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i64( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg6.nxv1i64(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1i64(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i64: +define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg6_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i64: +define void @test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg7.nxv1i64(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1i64(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i64: +define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg7_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i64: +define void @test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg8.nxv1i64(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1i64(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv1i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i64: +define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg8_mask_nxv1i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i64: +define void @test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv1i32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1i32(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i32: +define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i32: +define void @test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg3.nxv1i32(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1i32(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i32: +define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i32( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg3_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i32: +define void @test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i32( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg4.nxv1i32(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1i32(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i32: +define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i32( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg4_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i32: +define void @test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i32( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg5.nxv1i32(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1i32(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i32: +define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg5_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i32: +define void @test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg6.nxv1i32(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1i32(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i32: +define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg6_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i32: +define void @test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg7.nxv1i32(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1i32(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i32: +define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg7_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i32: +define void @test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg8.nxv1i32(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1i32(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv1i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i32: +define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg8_mask_nxv1i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i32: +define void @test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv8i16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv8i16(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv8i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8i16: +define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8i16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv8i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8i16: +define void @test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8i16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg3.nxv8i16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv8i16(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv8i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv8i16: +define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv8i16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg3_mask_nxv8i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv8i16: +define void @test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv8i16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg4.nxv8i16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv8i16(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv8i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv8i16: +define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv8i16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg4_mask_nxv8i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv8i16: +define void @test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv8i16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv4i8(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4i8(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i8: +define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i8( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg2_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i8: +define void @test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i8( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg3.nxv4i8(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv4i8(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4i8: +define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4i8( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg3_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4i8: +define void @test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4i8( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg4.nxv4i8(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv4i8(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4i8: +define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4i8( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg4_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4i8: +define void @test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4i8( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg5.nxv4i8(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv4i8(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv4i8: +define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv4i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg5_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv4i8: +define void @test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv4i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg6.nxv4i8(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv4i8(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv4i8: +define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg6_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv4i8: +define void @test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg7.nxv4i8(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv4i8(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv4i8: +define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg7_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv4i8: +define void @test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg8.nxv4i8(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv4i8(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv4i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv4i8: +define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3) ret void } -define void @test_vssseg8_mask_nxv4i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv4i8: +define void @test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vssseg2.nxv1i16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1i16(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i16: +define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i16: +define void @test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv1i16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1i16(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i16: +define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i16: +define void @test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv1i16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1i16(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i16: +define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i16: +define void @test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) + +define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) + +define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) + +define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) + +define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) + +define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) + +define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) + +define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) + +define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) + +define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) + +define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) + +define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) + +define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) + +define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) + +define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) + +define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) + +define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) + +define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) + +define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) + +define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) + +define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) + +define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) + +define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) + ret void +} + +define void @test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) + +define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg5.nxv1i16(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1i16(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i16: +define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg5_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i16: +define void @test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg6.nxv1i16(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1i16(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i16: +define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg6_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i16: +define void @test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg7.nxv1i16(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1i16(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i16: +define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg7_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i16: +define void @test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg8.nxv1i16(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1i16(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv1i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i16: +define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg8_mask_nxv1i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i16: +define void @test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg2.nxv2i32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2i32(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i32: +define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg2_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i32: +define void @test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg3.nxv2i32(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2i32(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i32: +define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i32( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg3_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i32: +define void @test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i32( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg4.nxv2i32(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2i32(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i32: +define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i32( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg4_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i32: +define void @test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i32( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg5.nxv2i32(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv2i32(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2i32: +define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2i32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg5_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2i32: +define void @test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2i32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg6.nxv2i32(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv2i32(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2i32: +define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg6_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2i32: +define void @test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg7.nxv2i32(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv2i32(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2i32: +define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg7_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2i32: +define void @test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg8.nxv2i32(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv2i32(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv2i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2i32: +define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg8_mask_nxv2i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2i32: +define void @test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg2.nxv8i8(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv8i8(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8i8: +define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8i8( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg2_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8i8: +define void @test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8i8( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg3.nxv8i8(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv8i8(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv8i8: +define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv8i8( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg3_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv8i8: +define void @test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv8i8( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg4.nxv8i8(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv8i8(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv8i8: +define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv8i8( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg4_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv8i8: +define void @test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv8i8( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg5.nxv8i8(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv8i8(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv8i8: +define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv8i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg5_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv8i8: +define void @test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv8i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg6.nxv8i8(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv8i8(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv8i8: +define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg6_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv8i8: +define void @test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg7.nxv8i8(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv8i8(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv8i8: +define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg7_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv8i8: +define void @test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg8.nxv8i8(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv8i8(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv8i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv8i8: +define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg8_mask_nxv8i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv8i8: +define void @test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg2.nxv4i64(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4i64(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv4i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i64: +define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i64( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg2_mask_nxv4i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i64: +define void @test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i64( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg2.nxv4i16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4i16(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4i16: +define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4i16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg2_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4i16: +define void @test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4i16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg3.nxv4i16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv4i16(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4i16: +define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4i16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg3_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4i16: +define void @test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4i16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg4.nxv4i16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv4i16(,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) -define void @test_vssseg4_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4i16: +define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4i16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg4_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4i16: +define void @test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4i16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg5.nxv4i16(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv4i16(,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg5_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv4i16: +define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv4i16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg5_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv4i16: +define void @test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv4i16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg6.nxv4i16(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv4i16(,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) -define void @test_vssseg6_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv4i16: +define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg6_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv4i16: +define void @test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg7.nxv4i16(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv4i16(,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) -define void @test_vssseg7_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv4i16: +define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg7_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv4i16: +define void @test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg8.nxv4i16(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv4i16(,,,,,,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) -define void @test_vssseg8_nxv4i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv4i16: +define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg8_mask_nxv4i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv4i16: +define void @test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg2.nxv1i8(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1i8(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1i8: +define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1i8( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg2_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1i8: +define void @test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1i8( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg3.nxv1i8(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1i8(,,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) -define void @test_vssseg3_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1i8: +define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1i8( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg3_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1i8: +define void @test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1i8( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg4.nxv1i8(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1i8(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1i8: +define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1i8( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1i8: +define void @test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1i8( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg5.nxv1i8(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1i8(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1i8: +define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg5_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1i8: +define void @test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg6.nxv1i8(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1i8(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1i8: +define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg6_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1i8: +define void @test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg7.nxv1i8(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1i8(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1i8: +define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg7_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1i8: +define void @test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg8.nxv1i8(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1i8(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv1i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1i8: +define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg8_mask_nxv1i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1i8: +define void @test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2i8(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2i8(,, ptr, i64, , i64) -define void @test_vssseg2_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i8: +define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i8( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i8: +define void @test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i8( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2i8(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2i8(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i8: +define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i8( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i8: +define void @test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i8( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2i8(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2i8(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i8: +define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i8( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i8: +define void @test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i8( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg5.nxv2i8(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv2i8(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2i8: +define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg5_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2i8: +define void @test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2i8( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg6.nxv2i8(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv2i8(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2i8: +define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg6_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2i8: +define void @test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg7.nxv2i8(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv2i8(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2i8: +define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg7_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2i8: +define void @test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg8.nxv2i8(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv2i8(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv2i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2i8: +define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg8_mask_nxv2i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2i8: +define void @test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma -; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv8i32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv8i32(,, ptr, i64, , i64) -define void @test_vssseg2_nxv8i32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8i32: +define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8i32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv8i32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8i32: +define void @test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8i32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv32i8(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv32i8(,, ptr, i64, , i64) -define void @test_vssseg2_nxv32i8( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv32i8: +define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv32i8( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv32i8( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv32i8: +define void @test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv32i8( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2i16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2i16(,, ptr, i64, , i64) -define void @test_vssseg2_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i16: +define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i16: +define void @test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2i16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2i16(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i16: +define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i16: +define void @test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2i16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2i16(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i16: +define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i16: +define void @test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg5.nxv2i16(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv2i16(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2i16: +define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2i16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg5_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2i16: +define void @test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2i16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg6.nxv2i16(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv2i16(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2i16: +define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg6_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2i16: +define void @test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg7.nxv2i16(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv2i16(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2i16: +define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg7_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2i16: +define void @test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg8.nxv2i16(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv2i16(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv2i16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2i16: +define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg8_mask_nxv2i16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2i16: +define void @test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2i64(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2i64(,, ptr, i64, , i64) -define void @test_vssseg2_nxv2i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2i64: +define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2i64( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv2i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2i64: +define void @test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2i64( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2i64(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2i64(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv2i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2i64: +define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2i64( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv2i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2i64: +define void @test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2i64( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2i64(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2i64(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv2i64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2i64: +define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2i64( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv2i64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2i64: +define void @test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2i64( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv16f16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv16f16(,, ptr, i64, , i64) -define void @test_vssseg2_nxv16f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv16f16: +define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16f16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv16f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv16f16: +define void @test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16f16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4f64(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4f64(,, ptr, i64, , i64) -define void @test_vssseg2_nxv4f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4f64: +define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4f64( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg2_mask_nxv4f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4f64: +define void @test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4f64( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg2.nxv1f64(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1f64(,, ptr, i64, , i64) -define void @test_vssseg2_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1f64: +define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1f64( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg2_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1f64: +define void @test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1f64( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg3.nxv1f64(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1f64(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1f64: +define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1f64( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg3_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1f64: +define void @test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1f64( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg4.nxv1f64(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1f64(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1f64: +define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1f64( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg4_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1f64: +define void @test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1f64( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg5.nxv1f64(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1f64(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1f64: +define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1f64( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg5_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1f64: +define void @test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1f64( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg6.nxv1f64(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1f64(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1f64: +define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg6_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1f64: +define void @test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg7.nxv1f64(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1f64(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1f64: +define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg7_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1f64: +define void @test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg8.nxv1f64(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1f64(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv1f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1f64: +define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg8_mask_nxv1f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1f64: +define void @test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg2.nxv2f32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2f32(,, ptr, i64, , i64) -define void @test_vssseg2_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2f32: +define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2f32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg2_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2f32: +define void @test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2f32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg3.nxv2f32(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2f32(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2f32: +define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2f32( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg3_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2f32: +define void @test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2f32( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg4.nxv2f32(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2f32(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2f32: +define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2f32( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg4_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2f32: +define void @test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2f32( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg5.nxv2f32(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv2f32(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2f32: +define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2f32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg5_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2f32: +define void @test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2f32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg6.nxv2f32(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv2f32(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2f32: +define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg6_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2f32: +define void @test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg7.nxv2f32(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv2f32(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2f32: +define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg7_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2f32: +define void @test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg8.nxv2f32(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv2f32(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv2f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2f32: +define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg8_mask_nxv2f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2f32: +define void @test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg2.nxv1f16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1f16(,, ptr, i64, , i64) -define void @test_vssseg2_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1f16: +define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1f16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg2_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1f16: +define void @test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1f16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg3.nxv1f16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1f16(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1f16: +define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1f16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5) ret void } -define void @test_vssseg3_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1f16: +define void @test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1f16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vssseg4.nxv1f16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1f16(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1f16: +define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1f16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg4_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +define void @test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1f16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg5.nxv1f16(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1f16(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1f16: +define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1f16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg5_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1f16: +define void @test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1f16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg6.nxv1f16(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1f16(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1f16: +define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg6_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1f16: +define void @test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg7.nxv1f16(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1f16(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1f16: +define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg7_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1f16: +define void @test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg8.nxv1f16(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1f16(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv1f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1f16: +define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg8_mask_nxv1f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1f16: +define void @test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg2.nxv1f32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv1f32(,, ptr, i64, , i64) -define void @test_vssseg2_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv1f32: +define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv1f32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg2_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv1f32: +define void @test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv1f32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg3.nxv1f32(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv1f32(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv1f32: +define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv1f32( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg3_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv1f32: +define void @test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv1f32( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg4.nxv1f32(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv1f32(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv1f32: +define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv1f32( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg4_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv1f32: +define void @test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv1f32( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg5.nxv1f32(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv1f32(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv1f32: +define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv1f32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg5_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv1f32: +define void @test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv1f32( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg6.nxv1f32(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv1f32(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv1f32: +define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg6_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv1f32: +define void @test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg7.nxv1f32(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv1f32(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv1f32: +define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 6) ret void } -define void @test_vssseg7_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv1f32: +define void @test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vssseg8.nxv1f32(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv1f32(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv1f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv1f32: +define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg8_mask_nxv1f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv1f32: +define void @test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma -; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv8f16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv8f16(,, ptr, i64, , i64) -define void @test_vssseg2_nxv8f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8f16: +define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8f16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv8f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8f16: +define void @test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8f16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv8f16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv8f16(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv8f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv8f16: +define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv8f16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv8f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv8f16: +define void @test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv8f16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv8f16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv8f16(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv8f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv8f16: +define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv8f16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv8f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv8f16: +define void @test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv8f16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv8f32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv8f32(,, ptr, i64, , i64) -define void @test_vssseg2_nxv8f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv8f32: +define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv8f32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv8f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv8f32: +define void @test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv8f32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2f64(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2f64(,, ptr, i64, , i64) -define void @test_vssseg2_nxv2f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2f64: +define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2f64( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv2f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2f64: +define void @test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2f64( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2f64(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2f64(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv2f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2f64: +define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2f64( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv2f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2f64: +define void @test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2f64( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2f64(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2f64(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv2f64( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2f64: +define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2f64( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv2f64( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2f64: +define void @test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2f64( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4f16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4f16(,, ptr, i64, , i64) -define void @test_vssseg2_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4f16: +define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4f16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4f16: +define void @test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4f16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv4f16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv4f16(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4f16: +define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4f16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4f16: +define void @test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4f16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv4f16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv4f16(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4f16: +define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4f16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4f16: +define void @test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4f16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg5.nxv4f16(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv4f16(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv4f16: +define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv4f16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg5_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv4f16: +define void @test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv4f16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg6.nxv4f16(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv4f16(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv4f16: +define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg6_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv4f16: +define void @test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg7.nxv4f16(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv4f16(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv4f16: +define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg7_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv4f16: +define void @test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg8.nxv4f16(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv4f16(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv4f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv4f16: +define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg8_mask_nxv4f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv4f16: +define void @test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv2f16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv2f16(,, ptr, i64, , i64) -define void @test_vssseg2_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv2f16: +define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv2f16( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv2f16: +define void @test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv2f16( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv2f16(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv2f16(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv2f16: +define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv2f16( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv2f16: +define void @test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv2f16( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv2f16(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv2f16(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv2f16: +define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv2f16( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv2f16: +define void @test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv2f16( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg5.nxv2f16(,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg5.mask.nxv2f16(,,,,, ptr, i64, , i64) -define void @test_vssseg5_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg5_nxv2f16: +define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.nxv2f16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg5_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg5_mask_nxv2f16: +define void @test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg5.mask.nxv2f16( %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg6.nxv2f16(,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg6.mask.nxv2f16(,,,,,, ptr, i64, , i64) -define void @test_vssseg6_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg6_nxv2f16: +define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg6_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg6_mask_nxv2f16: +define void @test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg7.nxv2f16(,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg7.mask.nxv2f16(,,,,,,, ptr, i64, , i64) -define void @test_vssseg7_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg7_nxv2f16: +define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg7_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg7_mask_nxv2f16: +define void @test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg8.nxv2f16(,,,,,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg8.mask.nxv2f16(,,,,,,,, ptr, i64, , i64) -define void @test_vssseg8_nxv2f16( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg8_nxv2f16: +define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg8_mask_nxv2f16( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg8_mask_nxv2f16: +define void @test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma -; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv4f32(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv4f32(,, ptr, i64, , i64) -define void @test_vssseg2_nxv4f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg2_nxv4f32: +define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv4f32( %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg2_mask_nxv4f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg2_mask_nxv4f32: +define void @test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv4f32( %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg3.nxv4f32(,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg3.mask.nxv4f32(,,, ptr, i64, , i64) -define void @test_vssseg3_nxv4f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg3_nxv4f32: +define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.nxv4f32( %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg3_mask_nxv4f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg3_mask_nxv4f32: +define void @test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg3.mask.nxv4f32( %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vssseg4.nxv4f32(,,,, ptr, i64, i64) -declare void @llvm.riscv.vssseg4.mask.nxv4f32(,,,, ptr, i64, , i64) -define void @test_vssseg4_nxv4f32( %val, ptr %base, i64 %offset, i64 %vl) { -; CHECK-LABEL: test_vssseg4_nxv4f32: +define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.nxv4f32( %val, %val, %val, %val, ptr %base, i64 %offset, i64 %vl) + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4) ret void } -define void @test_vssseg4_mask_nxv4f32( %val, ptr %base, i64 %offset, %mask, i64 %vl) { -; CHECK-LABEL: test_vssseg4_mask_nxv4f32: +define void @test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) { +; CHECK-LABEL: test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg4.mask.nxv4f32( %val, %val, %val, %val, ptr %base, i64 %offset, %mask, i64 %vl) + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll index 9848a37b120ab..9119d42ba0aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -1,13163 +1,11880 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16i16_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16i16_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16i16_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i8_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i8_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i8_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16: +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16: +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16: +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8: +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8: +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32: +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32: +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16: +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16: +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8: +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8: +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32: +define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32: +define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv16i8_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16: +define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12 +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16: +define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv16i8_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8: +define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8: +define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv16i8_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32: +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32: +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16: +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32: +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8: +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16: +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32: +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8: +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16: +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16: +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8: +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32: +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16: +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8: +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32: +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16: +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8: +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32: +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16: +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8: +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32: +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16: +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8: +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32: +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16: +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8: +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32: +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv4i16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16: +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv4i16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8: +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv4i16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32: +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8: +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32: +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16: +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8: +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32: +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16: +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8: +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32: +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16: +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8: +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32: +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16: +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8: +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32: +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16: +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8: +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32: +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16: +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8: +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32: +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 3) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 3) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16: +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16: +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16: +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8: +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8: +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32: +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16: +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8: +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv8i16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv8i16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv8i16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32: +define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16: +define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8: +define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv8i8_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv8i8_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv8i8_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i32_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i32_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv8i32_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv4i8_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv4i8_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv4i8_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1i16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv32i8_nxv32i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv32i8_nxv32i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i8_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i8_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i8_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv2i16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16: +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8: +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32: +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv4i32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16f16_nxv16i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16f16_nxv16i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv16f16_nxv16i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4f64_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4f64_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv4f64_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) -define void @test_vsuxseg4_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg5_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) -define void @test_vsuxseg6_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) -define void @test_vsuxseg7_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1f64_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1f64_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) -define void @test_vsuxseg8_nxv1f64_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) -define void @test_vsuxseg2_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i32) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) -define void @test_vsuxseg3_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv2f32_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32: +define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 -; CHECK-NEXT: ret +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv2f32_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8: +define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv2f32_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16: +define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv1f16_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv1f16_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv1f16_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv1f32_nxv1i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv1f32_nxv1i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv1f32_nxv1i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv8f16_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv8f16_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv8f16_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv8f32_nxv8i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16: +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16: +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 5) + ret void +} + + +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 6) + ret void +} + +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 6) + ret void +} + + +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) + ret void +} + + +define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv8f32_nxv8i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8: +define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8: +define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv8f32_nxv8i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32: +define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32: +define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f64_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f64_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f64_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv4f16_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv4f16_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv4f16_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i32) -define void @test_vsuxseg5_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i32) -define void @test_vsuxseg6_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i32) -define void @test_vsuxseg7_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv2f16_nxv2i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv2f16_nxv2i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i32) -define void @test_vsuxseg8_nxv2f16_nxv2i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i32) -define void @test_vsuxseg2_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i32) -define void @test_vsuxseg2_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32(,, ptr, , i32) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i32) -define void @test_vsuxseg2_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32(,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i32) -define void @test_vsuxseg3_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv4f32_nxv4i16( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv4f32_nxv4i8( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32(,,,, ptr, , i32) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i32) -define void @test_vsuxseg4_nxv4f32_nxv4i32( %val, ptr %base, %index, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, i32 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i32 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i32 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i32 %vl, i32 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll index 2b417c58af8a1..82698e6da2abf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -1,18796 +1,15676 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \ +; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i16_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i16_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i16_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32: +define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32: +define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i64: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i64: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32: +define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32: +define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i64: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i64: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32: +define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32: +define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i64: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i64: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv4i32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16: +define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16: +define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8: +define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8: +define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32: +define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32: +define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16: +define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16: +define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8: +define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8: +define void @test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32: +define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg3_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32: +define void @test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv16i8_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv16i8_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv16i8_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv16i8_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv16i8_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg4_mask_nxv16i8_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i64: +define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i32: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i16: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i8: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) ret void } -define void @test_vsuxseg2_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i64: +define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 3) + ret void +} + +define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 3) + ret void +} + +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) + ret void +} + +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i32: +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i16: +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i8: +define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i64: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i32: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i16: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i8: +define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i64: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i32: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i16: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i8: +define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i64: +define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i32: +define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i16: +define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i8: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i64: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i32: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i16: +define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i8: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i64: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i64: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i32: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i32: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i16: +define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i16: +define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i8: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i8: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i64: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16: +define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i64: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16: +define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i64: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16: +define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i64: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16: +define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i64: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16: +define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i64: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16: +define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i64: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i64: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16: +define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16: +define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i64: +define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i64: +define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i64: +define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i64: +define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i64: +define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i64: +define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8i16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i64: +define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i64: +define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i64: +define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i64: +define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i64: +define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i64: +define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i8_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i8_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i8_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i64: +define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i64: +define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i8_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4i8_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i64: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16: +define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i64: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16: +define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i64: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16: +define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i64: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16: +define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i64: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16: +define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i64: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16: +define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i64: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i64: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16: +define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16: +define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv1i16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1i16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16: +define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i64: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i64: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i64: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16: +define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i64: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16: +define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i64: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16: +define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i64: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv2i32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv2i32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv2i32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16: +define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16: +define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv2i32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i64: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2i32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i64: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i64: +define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i64: +define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16: +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8: +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i64: +define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i64: +define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i64: +define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i64: +define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv8i8_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv8i8_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv8i8_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i64: +define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i64: +define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv8i8_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv8i8_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i64_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i32: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i64_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i32: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i64_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i8: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i64_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i8: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i64_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i64: +define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i64_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i64: +define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i64_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i16: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i64_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i16: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i64: +define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i64: +define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16(,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg3_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i64: +define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16(,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg4_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i64: +define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16(,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg5_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i64: +define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16(,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg6_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i64: +define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16(,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg7_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i64: +define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i64: +define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg8_nxv4i16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv4i16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i64: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16: +define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i64: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i64: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i64: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i64: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16: +define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8: +define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i64: +define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32: +define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1i8_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i64: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i64: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1i8_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32: +define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32: +define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1i8_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1i8_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1i8_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8: +define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i64: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8: +define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i64: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8: +define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i64: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8: +define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i64: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8: +define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i64: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8: +define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i64: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i8_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i8_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8: +define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8: +define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i8_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i8_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i64: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i8_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i64: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8i32_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8i32_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8: +define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8: +define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8i32_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i64: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i64: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8i32_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8i32_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv32i8_nxv32i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv32i8_nxv32i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv32i8_nxv32i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8: +define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv32i8_nxv32i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8: +define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i64: +define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i64: +define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i64: +define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i64: +define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i64: +define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i64: +define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2i16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i64: +define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2i16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i64: +define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i32: +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i32: +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i8: +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i8: +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i16: +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i16: +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i64: +define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i64: +define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i32: +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i32: +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i8: +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i8: +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i16: +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i16: +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i64: +define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i64: +define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i32: +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i32: +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i8: +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i8: +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i16: +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i16: +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2i64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i64: +define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2i64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i64: +define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv16f16_nxv16i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16: +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv16f16_nxv16i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16: +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv16f16_nxv16i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8: +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv16f16_nxv16i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8: +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv16f16_nxv16i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32: +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv16f16_nxv16i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32: +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f64_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32: +define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32: +define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f64_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8: +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8: +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f64_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i64: +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i64: +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f64_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16: +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv4f64_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16: +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i64: +define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32: +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16: +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8: +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i64: +define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32: +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16: +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8: +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i64: +define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32: +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16: +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8: +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i64: +define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32: +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16: +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8: +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i64: +define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32: +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16: +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8: +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i64: +define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32: +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16: +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8: +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f64_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i64: +define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i64: +define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f64_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32: +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32: +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f64_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16: +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16: +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f64_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8: +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv1f64_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8: +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32: +define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8: +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16: +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i64: +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32: +define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8: +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16: +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i64: +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32: +define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8: +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16: +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i64: +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32: +define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8: +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16: +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i64: +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg5_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32: +define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8: +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16: +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i64: +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg6_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32: +define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8: +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16: +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i64: +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg7_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f32_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32: +define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32: +define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f32_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8: +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8: +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f32_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16: +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16: +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f32_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i64: +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg8_mask_nxv2f32_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i64: +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i64: +define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32: +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16: +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8: +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg2_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i64: +define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32: +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16: +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8: +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg3_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i64: +define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32: +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16: +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8: +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i64: +define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32: +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16: +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8: +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i64: +define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32: +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16: +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8: +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i64: +define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32: +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16: +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8: +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f16_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i64: +define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i64: +define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f16_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32: +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32: +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f16_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16: +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16: +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f16_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8: +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv1f16_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8: +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i64: +define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32: +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16: +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8: +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg2_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i64: +define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32: +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16: +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8: +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg3_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i64: +define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32: +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16: +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8: +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg4_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i64: +define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32: +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16: +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8: +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg5_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i64: +define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32: +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16: +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8: +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg6_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i64: +define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32: +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16: +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8: +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg7_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f32_nxv1i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i64: +define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 6) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i64: +define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 6) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f32_nxv1i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32: +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32: +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f32_nxv1i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16: +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16: +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv1f32_nxv1i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8: +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv1f32_nxv1i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8: +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16: +define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16: +define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8: +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8: +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i64: +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i64: +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32: +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32: +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16: +define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16: +define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8: +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8: +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i64: +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i64: +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32: +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32: +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv8f16_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16: +define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16: +define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv8f16_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8: +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8: +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv8f16_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i64: +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i64: +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv8f16_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32: +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv8f16_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32: +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f32_nxv8i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16: +define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16: +define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f32_nxv8i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8: +define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8: +define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f32_nxv8i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i64: +define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i64: +define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv8f32_nxv8i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32: +define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv8f32_nxv8i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32: +define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i64: +define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i64: +define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i64: +define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i64: +define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f64_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f64_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f64_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f64_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i64: +define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f64_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i64: +define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i64: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16: +define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i64: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i64: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i64: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16: +define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i64: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16: +define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv4f16_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv4f16_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv4f16_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i64: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i64: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vmv1r.v v20, v8 -; CHECK-NEXT: vmv1r.v v21, v8 -; CHECK-NEXT: vmv1r.v v22, v8 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv4f16_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16: +define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv4f16_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16: +define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i64: +define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i64: +define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i64: +define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64(,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64(,,,,, ptr, , , i64) -define void @test_vsuxseg5_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i64: +define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg5_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64(,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64(,,,,,, ptr, , , i64) -define void @test_vsuxseg6_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i64: +define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg6_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64(,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64(,,,,,,, ptr, , , i64) -define void @test_vsuxseg7_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i64: +define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg7_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f16_nxv2i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f16_nxv2i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f16_nxv2i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64(,,,,,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, ptr, , , i64) -define void @test_vsuxseg8_nxv2f16_nxv2i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i64: +define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg8_mask_nxv2f16_nxv2i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i64: +define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vmv1r.v v16, v8 -; CHECK-NEXT: vmv1r.v v17, v8 -; CHECK-NEXT: vmv1r.v v18, v8 -; CHECK-NEXT: vmv1r.v v19, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i64: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i64: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16(,, ptr, , , i64) -define void @test_vsuxseg2_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16: +define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16: +define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16( %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i64: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i64: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16(,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16(,,, ptr, , , i64) -define void @test_vsuxseg3_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16: +define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg3_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16: +define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f32_nxv4i32( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i32( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f32_nxv4i8( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i8( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f32_nxv4i64( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i64: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i64( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i64: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } -declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16(,,,, ptr, , i64) -declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16(,,,, ptr, , , i64) -define void @test_vsuxseg4_nxv4f32_nxv4i16( %val, ptr %base, %index, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16: +define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 4) ret void } -define void @test_vsuxseg4_mask_nxv4f32_nxv4i16( %val, ptr %base, %index, %mask, i64 %vl) { -; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16: +define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vmv2r.v v16, v8 -; CHECK-NEXT: vmv2r.v v18, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, ptr %base, %index, %mask, i64 %vl) + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 4) ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll index 0f1e33e47f03e..5818c7979f3c6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -5,227 +5,207 @@ ; Make sure we don't select a 0 vl to X0 in the custom isel handlers we use ; for these intrinsics. -declare {,} @llvm.riscv.vlseg2.nxv16i16(,, ptr, i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) define @test_vlseg2_mask_nxv16i16(ptr %base, %mask) { ; CHECK-LABEL: test_vlseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, ptr %base, i64 0) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, ptr %base, %mask, i64 0, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 0, i64 4) + %1 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.nxv16i16(target("riscv.vector.tuple", , 2) %0, ptr %base, %mask, i64 0, i64 1, i64 4) + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %2 } -declare {,} @llvm.riscv.vlsseg2.nxv16i16(,, ptr, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, ptr, i64, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) define @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, %mask) { ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, ptr %base, i64 %offset, i64 0) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, ptr %base, i64 %offset, %mask, i64 0, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 0, i64 4) + %1 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %0, ptr %base, i64 %offset, %mask, i64 0, i64 1, i64 4) + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) define @test_vloxseg2_mask_nxv16i16_nxv16i16(ptr %base, %index, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16( undef, undef, ptr %base, %index, i64 0) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, ptr %base, %index, %mask, i64 0, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 0, i64 4) + %1 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %0, ptr %base, %index, %mask, i64 0, i64 1, i64 4) + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) define @test_vluxseg2_mask_nxv16i16_nxv16i16(ptr %base, %index, %mask) { ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16( undef, undef, ptr %base, %index, i64 0) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %1, %1, ptr %base, %index, %mask, i64 0, i64 1) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 0, i64 4) + %1 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %0, ptr %base, %index, %mask, i64 0, i64 1, i64 4) + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) + ret %2 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, ptr , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, ptr, , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) define @test_vlseg2ff_nxv16i16(ptr %base, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma -; CHECK-NEXT: vlseg2e16ff.v v4, (a0) +; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, ptr %base, i64 0) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 0, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 0) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -define @test_vlseg2ff_mask_nxv16i16( %val, ptr %base, %mask, ptr %outvl) { +define @test_vlseg2ff_mask_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t +; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 0, i64 1) - %1 = extractvalue {,, i64} %0, 1 - %2 = extractvalue {,, i64} %0, 2 - store i64 %2, ptr %outvl - ret %1 + %0 = tail call {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 0, i64 1, i64 4) + %1 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 0 + %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 0) + %3 = extractvalue {target("riscv.vector.tuple", , 2), i64} %0, 1 + store i64 %3, ptr %outvl + ret %2 } -declare void @llvm.riscv.vsseg2.nxv16i16(,, ptr , i64) -declare void @llvm.riscv.vsseg2.mask.nxv16i16(,, ptr, , i64) +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -define void @test_vsseg2_nxv16i16( %val, ptr %base) { +define void @test_vsseg2_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base) { ; CHECK-LABEL: test_vsseg2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, ptr %base, i64 0) + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 0, i64 4) ret void } -define void @test_vsseg2_mask_nxv16i16( %val, ptr %base, %mask) { +define void @test_vsseg2_mask_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask) { ; CHECK-LABEL: test_vsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, ptr %base, %mask, i64 0) + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 0, i64 4) ret void } -declare void @llvm.riscv.vssseg2.nxv16i16(,, ptr, i64, i64) -declare void @llvm.riscv.vssseg2.mask.nxv16i16(,, ptr, i64, , i64) +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) -define void @test_vssseg2_nxv16i16( %val, ptr %base, i64 %offset) { +define void @test_vssseg2_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset) { ; CHECK-LABEL: test_vssseg2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, ptr %base, i64 %offset, i64 0) + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 0, i64 4) ret void } -define void @test_vssseg2_mask_nxv16i16( %val, ptr %base, i64 %offset, %mask) { +define void @test_vssseg2_mask_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask) { ; CHECK-LABEL: test_vssseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, ptr %base, i64 %offset, %mask, i64 0) + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 0, i64 4) ret void } -declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64) +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsoxseg2_nxv16i16_nxv16i16( %val, ptr %base, %index) { +define void @test_vsoxseg2_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, i64 0) + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 0, i64 4) ret void } -define void @test_vsoxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, %mask) { +define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask) { ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 0) + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 0, i64 4) ret void } -declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16(,, ptr, , i64) -declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(,, ptr, , , i64) +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) -define void @test_vsuxseg2_nxv16i16_nxv16i16( %val, ptr %base, %index) { +define void @test_vsuxseg2_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, i64 0) + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 0, i64 4) ret void } -define void @test_vsuxseg2_mask_nxv16i16_nxv16i16( %val, ptr %base, %index, %mask) { +define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask) { ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, ptr %base, %index, %mask, i64 0) + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 0, i64 4) ret void } diff --git a/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll index 66ece62bd74fa..e601ba4191459 100644 --- a/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll +++ b/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll @@ -44,11 +44,19 @@ define void @load_factor2_as(ptr addrspace(1) %ptr) { define void @load_factor2_vscale(ptr %ptr) { ; RV32-LABEL: @load_factor2_vscale( -; RV32-NEXT: [[TMP1:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32( poison, poison, ptr [[PTR:%.*]], i32 -1) +; RV32-NEXT: [[TMP1:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t.i32(target("riscv.vector.tuple", , 2) poison, ptr [[PTR:%.*]], i32 -1, i32 5) +; RV32-NEXT: [[TMP2:%.*]] = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) [[TMP1]], i32 0) +; RV32-NEXT: [[TMP3:%.*]] = insertvalue { , } poison, [[TMP2]], 0 +; RV32-NEXT: [[TMP4:%.*]] = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) [[TMP1]], i32 1) +; RV32-NEXT: [[TMP5:%.*]] = insertvalue { , } [[TMP3]], [[TMP4]], 1 ; RV32-NEXT: ret void ; ; RV64-LABEL: @load_factor2_vscale( -; RV64-NEXT: [[TMP1:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64( poison, poison, ptr [[PTR:%.*]], i64 -1) +; RV64-NEXT: [[TMP1:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[PTR:%.*]], i64 -1, i64 5) +; RV64-NEXT: [[TMP2:%.*]] = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) [[TMP1]], i32 0) +; RV64-NEXT: [[TMP3:%.*]] = insertvalue { , } poison, [[TMP2]], 0 +; RV64-NEXT: [[TMP4:%.*]] = call @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) [[TMP1]], i32 1) +; RV64-NEXT: [[TMP5:%.*]] = insertvalue { , } [[TMP3]], [[TMP4]], 1 ; RV64-NEXT: ret void ; %interleaved.vec = load , ptr %ptr @@ -288,11 +296,15 @@ define void @store_factor2_as(ptr addrspace(1) %ptr, <8 x i8> %v0, <8 x i8> %v1) define void @store_factor2_vscale(ptr %ptr, %v0, %v1) { ; RV32-LABEL: @store_factor2_vscale( -; RV32-NEXT: call void @llvm.riscv.vsseg2.nxv8i8.i32( [[V0:%.*]], [[V1:%.*]], ptr [[PTR:%.*]], i32 -1) +; RV32-NEXT: [[TMP1:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) poison, [[V0:%.*]], i32 0) +; RV32-NEXT: [[TMP2:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) [[TMP1]], [[V1:%.*]], i32 1) +; RV32-NEXT: call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t.i32(target("riscv.vector.tuple", , 2) [[TMP2]], ptr [[PTR:%.*]], i32 -1, i32 3) ; RV32-NEXT: ret void ; ; RV64-LABEL: @store_factor2_vscale( -; RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], ptr [[PTR:%.*]], i64 -1) +; RV64-NEXT: [[TMP1:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) poison, [[V0:%.*]], i32 0) +; RV64-NEXT: [[TMP2:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2) [[TMP1]], [[V1:%.*]], i32 1) +; RV64-NEXT: call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[TMP2]], ptr [[PTR:%.*]], i64 -1, i64 3) ; RV64-NEXT: ret void ; %interleaved.vec = call @llvm.vector.interleave2.nxv8i8( %v0, %v1)